text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/python
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2018 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import sys
import os
import glob
import re
DriverPath = ''
if (len(sys.argv) == 2):
DriverPath = sys.argv[1] + '/'
sys.path.insert(0, os.path.abspath(os.getcwd()))
def pts(category, pyfile):
print('Auto-documenting %s file %s' % (category, pyfile))
# Available databases in psi4/share/psi4/databases
fdriver = open('source/autodoc_available_databases.rst', 'w')
fdriver.write('\n\n')
for pyfile in glob.glob(DriverPath + '../../psi4/share/psi4/databases/*.py'):
filename = os.path.split(pyfile)[1]
basename = os.path.splitext(filename)[0]
div = '=' * len(basename)
if basename not in ['input']:
pts('database', basename)
fdriver.write(':srcdb:`%s`\n%s\n\n' % (basename, '"' * (9 + len(basename))))
fdriver.write('.. automodule:: %s\n\n' % (basename))
fdriver.write('----\n')
fdriver.write('\n')
fdriver.close()
|
amjames/psi4
|
doc/sphinxman/document_databases.py
|
Python
|
lgpl-3.0
| 1,819
|
[
"Psi4"
] |
b72929d5f6bcd84ed538eaf1d6fa2dda56b9085d53d4743dc2302c0f42c4e285
|
# Authors : Denis A. Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License : BSD 3-clause
from copy import deepcopy
import math
import numpy as np
from scipy import fftpack
# XXX explore cuda optimazation at some point.
from ..io.pick import pick_types, pick_info
from ..utils import verbose, warn
from ..parallel import parallel_func, check_n_jobs
from .tfr import AverageTFR, _get_data
def _check_input_st(x_in, n_fft):
"""Aux function."""
# flatten to 2 D and memorize original shape
n_times = x_in.shape[-1]
def _is_power_of_two(n):
return not (n > 0 and ((n & (n - 1))))
if n_fft is None or (not _is_power_of_two(n_fft) and n_times > n_fft):
# Compute next power of 2
n_fft = 2 ** int(math.ceil(math.log(n_times, 2)))
elif n_fft < n_times:
raise ValueError("n_fft cannot be smaller than signal size. "
"Got %s < %s." % (n_fft, n_times))
if n_times < n_fft:
warn('The input signal is shorter ({0}) than "n_fft" ({1}). '
'Applying zero padding.'.format(x_in.shape[-1], n_fft))
zero_pad = n_fft - n_times
pad_array = np.zeros(x_in.shape[:-1] + (zero_pad,), x_in.dtype)
x_in = np.concatenate((x_in, pad_array), axis=-1)
else:
zero_pad = 0
return x_in, n_fft, zero_pad
def _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width):
"""Precompute stockwell gausian windows (in the freq domain)."""
tw = fftpack.fftfreq(n_samp, 1. / sfreq) / n_samp
tw = np.r_[tw[:1], tw[1:][::-1]]
k = width # 1 for classical stowckwell transform
f_range = np.arange(start_f, stop_f, 1)
windows = np.empty((len(f_range), len(tw)), dtype=np.complex)
for i_f, f in enumerate(f_range):
if f == 0.:
window = np.ones(len(tw))
else:
window = ((f / (np.sqrt(2. * np.pi) * k)) *
np.exp(-0.5 * (1. / k ** 2.) * (f ** 2.) * tw ** 2.))
window /= window.sum() # normalisation
windows[i_f] = fftpack.fft(window)
return windows
def _st(x, start_f, windows):
"""Compute ST based on Ali Moukadem MATLAB code (used in tests)."""
n_samp = x.shape[-1]
ST = np.empty(x.shape[:-1] + (len(windows), n_samp), dtype=np.complex)
# do the work
Fx = fftpack.fft(x)
XF = np.concatenate([Fx, Fx], axis=-1)
for i_f, window in enumerate(windows):
f = start_f + i_f
ST[..., i_f, :] = fftpack.ifft(XF[..., f:f + n_samp] * window)
return ST
def _st_power_itc(x, start_f, compute_itc, zero_pad, decim, W):
"""Aux function."""
n_samp = x.shape[-1]
n_out = (n_samp - zero_pad)
n_out = n_out // decim + bool(n_out % decim)
psd = np.empty((len(W), n_out))
itc = np.empty_like(psd) if compute_itc else None
X = fftpack.fft(x)
XX = np.concatenate([X, X], axis=-1)
for i_f, window in enumerate(W):
f = start_f + i_f
ST = fftpack.ifft(XX[:, f:f + n_samp] * window)
if zero_pad > 0:
TFR = ST[:, :-zero_pad:decim]
else:
TFR = ST[:, ::decim]
TFR_abs = np.abs(TFR)
if compute_itc:
TFR /= TFR_abs
itc[i_f] = np.abs(np.mean(TFR, axis=0))
TFR_abs *= TFR_abs
psd[i_f] = np.mean(TFR_abs, axis=0)
return psd, itc
def tfr_array_stockwell(data, sfreq, fmin=None, fmax=None, n_fft=None,
width=1.0, decim=1, return_itc=False, n_jobs=1):
"""Compute power and intertrial coherence using Stockwell (S) transform.
See [1]_, [2]_, [3]_, [4]_ for more information.
Parameters
----------
data : ndarray
The signal to transform. Any dimensionality supported as long
as the last dimension is time.
sfreq : float
The sampling frequency.
fmin : None, float
The minimum frequency to include. If None defaults to the minimum fft
frequency greater than zero.
fmax : None, float
The maximum frequency to include. If None defaults to the maximum fft.
n_fft : int | None
The length of the windows used for FFT. If None, it defaults to the
next power of 2 larger than the signal length.
width : float
The width of the Gaussian window. If < 1, increased temporal
resolution, if > 1, increased frequency resolution. Defaults to 1.
(classical S-Transform).
decim : int
The decimation factor on the time axis. To reduce memory usage.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
n_jobs : int
Number of parallel jobs to use.
Returns
-------
st_power : ndarray
The multitaper power of the Stockwell transformed data.
The last two dimensions are frequency and time.
itc : ndarray
The intertrial coherence. Only returned if return_itc is True.
freqs : ndarray
The frequencies.
References
----------
.. [1] Stockwell, R. G. "Why use the S-transform." AMS Pseudo-differential
operators: Partial differential equations and time-frequency
analysis 52 (2007): 279-309.
.. [2] Moukadem, A., Bouguila, Z., Abdeslam, D. O, and Dieterlen, A.
Stockwell transform optimization applied on the detection of split in
heart sounds (2014). Signal Processing Conference (EUSIPCO), 2013
Proceedings of the 22nd European, pages 2015--2019.
.. [3] Wheat, K., Cornelissen, P. L., Frost, S.J, and Peter C. Hansen
(2010). During Visual Word Recognition, Phonology Is Accessed
within 100 ms and May Be Mediated by a Speech Production
Code: Evidence from Magnetoencephalography. The Journal of
Neuroscience, 30 (15), 5229-5233.
.. [4] K. A. Jones and B. Porjesz and D. Chorlian and M. Rangaswamy and C.
Kamarajan and A. Padmanabhapillai and A. Stimus and H. Begleiter
(2006). S-transform time-frequency analysis of P300 reveals deficits in
individuals diagnosed with alcoholism.
Clinical Neurophysiology 117 2128--2143
See Also
--------
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_array_morlet
"""
n_epochs, n_channels = data.shape[:2]
n_out = data.shape[2] // decim + bool(data.shape[2] % decim)
data, n_fft_, zero_pad = _check_input_st(data, n_fft)
freqs = fftpack.fftfreq(n_fft_, 1. / sfreq)
if fmin is None:
fmin = freqs[freqs > 0][0]
if fmax is None:
fmax = freqs.max()
start_f = np.abs(freqs - fmin).argmin()
stop_f = np.abs(freqs - fmax).argmin()
freqs = freqs[start_f:stop_f]
W = _precompute_st_windows(data.shape[-1], start_f, stop_f, sfreq, width)
n_freq = stop_f - start_f
psd = np.empty((n_channels, n_freq, n_out))
itc = np.empty((n_channels, n_freq, n_out)) if return_itc else None
parallel, my_st, _ = parallel_func(_st_power_itc, n_jobs)
tfrs = parallel(my_st(data[:, c, :], start_f, return_itc, zero_pad,
decim, W)
for c in range(n_channels))
for c, (this_psd, this_itc) in enumerate(iter(tfrs)):
psd[c] = this_psd
if this_itc is not None:
itc[c] = this_itc
return psd, itc, freqs
@verbose
def tfr_stockwell(inst, fmin=None, fmax=None, n_fft=None,
width=1.0, decim=1, return_itc=False, n_jobs=1,
verbose=None):
"""Time-Frequency Representation (TFR) using Stockwell Transform.
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
fmin : None, float
The minimum frequency to include. If None defaults to the minimum fft
frequency greater than zero.
fmax : None, float
The maximum frequency to include. If None defaults to the maximum fft.
n_fft : int | None
The length of the windows used for FFT. If None, it defaults to the
next power of 2 larger than the signal length.
width : float
The width of the Gaussian window. If < 1, increased temporal
resolution, if > 1, increased frequency resolution. Defaults to 1.
(classical S-Transform).
decim : int
The decimation factor on the time axis. To reduce memory usage.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
n_jobs : int
The number of jobs to run in parallel (over channels).
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
power : AverageTFR
The averaged power.
itc : AverageTFR
The intertrial coherence. Only returned if return_itc is True.
See Also
--------
mne.time_frequency.tfr_array_stockwell
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_array_morlet
Notes
-----
.. versionadded:: 0.9.0
"""
# verbose dec is used b/c subfunctions are verbose
data = _get_data(inst, return_itc)
picks = pick_types(inst.info, meg=True, eeg=True)
info = pick_info(inst.info, picks)
data = data[:, picks, :]
n_jobs = check_n_jobs(n_jobs)
power, itc, freqs = tfr_array_stockwell(data, sfreq=info['sfreq'],
fmin=fmin, fmax=fmax, n_fft=n_fft,
width=width, decim=decim,
return_itc=return_itc,
n_jobs=n_jobs)
times = inst.times[::decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave, method='stockwell-power')
if return_itc:
out = (out, AverageTFR(deepcopy(info), itc, times.copy(),
freqs.copy(), nave, method='stockwell-itc'))
return out
|
nicproulx/mne-python
|
mne/time_frequency/_stockwell.py
|
Python
|
bsd-3-clause
| 10,146
|
[
"Gaussian"
] |
2d32f72d958bd5cd671bce8aa0eb46338e4428c1867c4109bb03ba55505235f7
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gi.repository import Gtk
from gi.repository import Pango, PangoCairo
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.plug.docgen import (BaseDoc, TextDoc, FONT_SERIF, PARA_ALIGN_RIGHT,
FONT_SANS_SERIF, FONT_MONOSPACE, PARA_ALIGN_CENTER,
PARA_ALIGN_LEFT)
from ...managedwindow import ManagedWindow
RESOLUTION = PangoCairo.font_map_get_default().get_resolution()
def pixels(cm):
return int (RESOLUTION/2.54 * cm)
#------------------------------------------------------------------------
#
# Constants
#
#------------------------------------------------------------------------
LEFT,RIGHT,CENTER = 'LEFT','RIGHT','CENTER'
_WIDTH_IN_CHARS = 72
class DisplayBuf(ManagedWindow):
def __init__(self, title, document):
self.title = title
ManagedWindow.__init__(self, document.uistate, [],
document)
self.set_window(Gtk.Dialog("",document.uistate.window,
Gtk.DialogFlags.DESTROY_WITH_PARENT,
(Gtk.STOCK_CLOSE, Gtk.ResponseType.CLOSE)),
None, title)
self.window.set_size_request(600,400)
scrolled_window = Gtk.ScrolledWindow()
scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC,Gtk.PolicyType.AUTOMATIC)
document.text_view = Gtk.TextView()
document.text_view.set_buffer(document.buffer)
self.window.connect('response', self.close)
scrolled_window.add(document.text_view)
self.window.vbox.pack_start(scrolled_window, True, True, 0)
self.window.show_all()
def build_menu_names(self, obj):
return ('View', _('Quick View'))
def get_title(self):
return self.title
class DocumentManager(object):
def __init__(self, title, document, text_view):
self.title = title
self.document = document
document.text_view = text_view
text_view.set_buffer(document.buffer)
#------------------------------------------------------------------------
#
# TextBuf
#
#------------------------------------------------------------------------
class TextBufDoc(BaseDoc, TextDoc):
#--------------------------------------------------------------------
#
# Opens the file, resets the text buffer.
#
#--------------------------------------------------------------------
def open(self, filename, container=None):
self.has_data = True
self.tag_table = Gtk.TextTagTable()
sheet = self.get_style_sheet()
for name in sheet.get_paragraph_style_names():
tag = Gtk.TextTag(name=name)
style = sheet.get_paragraph_style(name)
font = style.get_font()
if font.get_type_face() == FONT_SERIF:
tag.set_property("family", "Serif")
elif font.get_type_face() == FONT_SANS_SERIF:
tag.set_property("family", "Sans")
elif font.get_type_face() == FONT_MONOSPACE:
tag.set_property("family", "MonoSpace")
tag.set_property("size-points", float(font.get_size()))
if font.get_bold():
tag.set_property("weight", Pango.Weight.BOLD)
if style.get_alignment() == PARA_ALIGN_RIGHT:
tag.set_property("justification", Gtk.Justification.RIGHT)
elif style.get_alignment() == PARA_ALIGN_LEFT:
tag.set_property("justification", Gtk.Justification.LEFT)
elif style.get_alignment() == PARA_ALIGN_CENTER:
tag.set_property("justification", Gtk.Justification.CENTER)
else:
tag.set_property("justification", Gtk.Justification.FILL)
if font.get_italic():
tag.set_property("style", Pango.Style.ITALIC)
if style.get_first_indent():
tag.set_property("indent", pixels(style.get_first_indent()))
#tag.set_property("tabs", [pixels(abs(style.get_first_indent()))])
tag.set_property("left-margin", pixels(style.get_left_margin()))
tag.set_property("right-margin", pixels(style.get_right_margin()))
tag.set_property("pixels-above-lines", pixels(style.get_top_margin()))
tag.set_property("pixels-below-lines", pixels(style.get_bottom_margin()))
tag.set_property("wrap-mode", Gtk.WrapMode.WORD)
new_tabs = style.get_tabs()
tab_array = Pango.TabArray.new(initial_size=len(new_tabs)+1,
positions_in_pixels=True)
index = 0
for tab in map(pixels, new_tabs):
tab_array.set_tab(index, Pango.TabAlign.LEFT, tab)
index += 1
tag.set_property("tabs", tab_array)
self.tag_table.add(tag)
self.buffer = Gtk.TextBuffer.new(self.tag_table)
if container:
return DocumentManager(_('Quick View'), self, container)
else:
DisplayBuf(_('Quick View'), self)
return
#--------------------------------------------------------------------
#
# Close the file. Call the app if required.
#
#--------------------------------------------------------------------
def close(self):
pass
def get_usable_width(self):
return _WIDTH_IN_CHARS
#--------------------------------------------------------------------
#
# Force a section page break
#
#--------------------------------------------------------------------
def end_page(self):
return
self.f.write('\012')
def start_bold(self):
pass
def end_bold(self):
pass
def page_break(self):
pass
def start_superscript(self):
return
def end_superscript(self):
return
#--------------------------------------------------------------------
#
# Starts a paragraph.
#
#--------------------------------------------------------------------
def start_paragraph(self, style_name, leader=None):
self.style_name = style_name
if leader:
self.text = leader + "\t"
else:
self.text = ""
#--------------------------------------------------------------------
#
# End a paragraph. First format it to the desired widths.
# If not in table cell, write it immediately. If in the cell,
# add it to the list for this cell after formatting.
#
#--------------------------------------------------------------------
def end_paragraph(self):
self.buffer.insert_with_tags_by_name(
self.buffer.get_end_iter(),
self.text + "\n",
self.style_name)
#--------------------------------------------------------------------
#
# Start a table. Grab the table style, and store it.
#
#--------------------------------------------------------------------
def start_table(self, name,style_name):
return
styles = self.get_style_sheet()
self.tbl_style = styles.get_table_style(style_name)
self.ncols = self.tbl_style.get_columns()
#--------------------------------------------------------------------
#
# End a table. Turn off the self.in_cell flag
#
#--------------------------------------------------------------------
def end_table(self):
return
self.in_cell = 0
#--------------------------------------------------------------------
#
# Start a row. Initialize lists for cell contents, number of lines,
# and the widths. It is necessary to keep a list of cell contents
# that is to be written after all the cells are defined.
#
#--------------------------------------------------------------------
def start_row(self):
return
self.cellpars = [''] * self.ncols
self.cell_lines = [0] * self.ncols
self.cell_widths = [0] * self.ncols
self.cellnum = -1
self.maxlines = 0
table_width = self.get_usable_width() * self.tbl_style.get_width() / 100.0
for cell in range(self.ncols):
self.cell_widths[cell] = int( table_width * \
self.tbl_style.get_column_width(cell) / 100.0 )
#--------------------------------------------------------------------
#
# End a row. Write the cell contents. Write the line of spaces
# if the cell has fewer lines than the maximum number.
#
#--------------------------------------------------------------------
def end_row(self):
return
self.in_cell = 0
cell_text = [None]*self.ncols
for cell in range(self.ncols):
if self.cell_widths[cell]:
blanks = ' '*self.cell_widths[cell] + '\n'
if self.cell_lines[cell] < self.maxlines:
self.cellpars[cell] = self.cellpars[cell] \
+ blanks * (self.maxlines-self.cell_lines[cell])
cell_text[cell] = self.cellpars[cell].split('\n')
for line in range(self.maxlines):
for cell in range(self.ncols):
if self.cell_widths[cell]:
self.f.write(cell_text[cell][line])
self.f.write('\n')
#--------------------------------------------------------------------
#
# Start a cell. Set the self.in_cell flag, increment the curren cell number.
#
#--------------------------------------------------------------------
def start_cell(self,style_name,span=1):
return
self.in_cell = 1
self.cellnum = self.cellnum + span
span -= 1
while span:
self.cell_widths[self.cellnum-span] = 0
span -= 1
#--------------------------------------------------------------------
#
# End a cell. Find out the number of lines in this cell, correct
# the maximum number of lines if necessary.
#
#--------------------------------------------------------------------
def end_cell(self):
return
self.in_cell = 0
self.cell_lines[self.cellnum] = self.cellpars[self.cellnum].count('\n')
if self.cell_lines[self.cellnum] > self.maxlines:
self.maxlines = self.cell_lines[self.cellnum]
def add_media_object(self, name, align, w_cm, h_cm, alt=''):
return
this_text = '(photo)'
if self.in_cell:
self.cellpars[self.cellnum] = self.cellpars[self.cellnum] + this_text
else:
self.f.write(this_text)
def write_note(self,text,format,style_name):
return
if format == 1:
for line in text.split('\n'):
self.start_paragraph(style_name)
self.write_text(line)
self.end_paragraph()
elif format == 0:
for line in text.split('\n\n'):
self.start_paragraph(style_name)
line = line.replace('\n',' ')
line = ' '.join(line.split())
self.write_text(line)
self.end_paragraph()
#--------------------------------------------------------------------
#
# Writes text.
#--------------------------------------------------------------------
def write_text(self,text,mark=None):
self.text = self.text + text
|
pmghalvorsen/gramps_branch
|
gramps/gui/plug/quick/_textbufdoc.py
|
Python
|
gpl-2.0
| 12,680
|
[
"Brian"
] |
c21b1779c5c232e52938d0fc027031cc27b92014d26d9015a1bd90312739fa3a
|
# -*- coding: utf-8 -*-
# @Date : Nov 22 2012
# @Author : Tachyon Technologies Pvt Ltd
# @Version : 1
from email.MIMEText import MIMEText
from aifc import Error
import cherrypy
from QuillSourceProcessor import QuillSourceProcessor
from QuillManual import QuillManual
import const
import MySQLdb
import logging
import logging.config
import smtplib
from email.MIMEText import MIMEText
from email.MIMEMessage import MIMEMessage
from email.Header import Header
import email.Utils
import MimeWriter, mimetools, cStringIO
import htmllib, formatter
import time
import re, os, signal, sys
import quilljson as json
import demjson
import QuillPrimary
import config
import urllib2
from cherrypy.process.plugins import PIDFile
logging.config.fileConfig('logsystem.conf')
logger = logging.getLogger('quillpad')
class QuillCherry:
@cherrypy.expose
def getCorrections(self, lang, currWord, userInput, pos, rand=None, callback=None, scid=None):
currWord = currWord.split(",")
c = self.quillProcessor.getCorrections(lang,
[x.decode('utf-8') for x in currWord], userInput, int(pos))
o = {}
words = []
for i in c:
word = "".join(i)
o[word] = i
words.append(word)
ret = json.encode({'options': words, 'optmap': o})
if callback:
ret = "%s(%s,%s)" % (callback, ret, scid)
return ret
@cherrypy.expose
def getCorrectionsStr(self, lang, currWord, userInput, pos, rand=None, callback=None, scid=None):
c = self.quillProcessor.getCorrectionsStr(lang, currWord.decode('utf-8'), userInput, int(pos))
o = {}
words = []
for i in c:
word = "".join(i)
o[word] = i
words.append(word)
ret = json.encode({'options': words, 'optmap': o})
if callback:
ret = "%s(%s,%s)" % (callback, ret, scid)
return ret
@cherrypy.expose
def processText(self, inString,rand,lang):
self.quillProcessor.switchLanguage(lang)
return inString + "\n" + const.optionSeperator.join(self.quillProcessor.processText(inString.lower(), True).split(const.optionSeperator)[:4])
@cherrypy.expose
def processWord(self, inString,lang,rand=None):
self.quillProcessor.switchLanguage(lang)
if self.preprocs.has_key(lang) :
if self.preprocs[lang].has_key(inString.lower()) :
return inString + "\n" + const.optionSeperator.join(self.preprocs[lang][inString.lower()][:4])
return inString + "\n" + const.optionSeperator.join(
self.quillProcessor.processText(inString.lower()).split(const.optionSeperator)[:4])
@cherrypy.expose
def xlit(self, inString, lang, rand=None):
d = self.quillProcessor.xlit(inString, lang)
return json.encode(d)
@cherrypy.expose
def processString(self, inString, lang, rand=None):
return self.quillProcessor.processString(inString, lang)
@cherrypy.expose
def processWordJSON(self, inString, lang, rand=None, callback=None, scid=None):
d = self.quillProcessor.processWord(inString, lang)
try:
d['itrans'] = self.quillPrimary[lang].primaryToUnicode(inString)
except KeyError, e:
d['itrans'] = inString
# Adding the preproc words
if self.preprocs.has_key(lang) :
if self.preprocs[lang].has_key(inString.lower()):
preProcWord = self.preprocs[lang][inString.lower()][0]
if preProcWord in d['twords'][0]['options']:
d['twords'][0]['options'].remove(preProcWord)
d['twords'][0]['options'].insert(0, preProcWord)
ret = json.encode(d)
if callback:
ret = "%s(%s,%s)" % (callback, ret, scid)
return ret
@cherrypy.expose
def processEngWord(self, inString, lang, rand=None, callback=None, scid=None):
d = self.quillProcessor.processWord(inString, lang)
try:
d['itrans'] = self.quillPrimary[lang].primaryToUnicode(inString)
except KeyError, e:
d['itrans'] = inString
# Adding the preproc words
if self.preprocs.has_key(lang) :
if self.preprocs[lang].has_key(inString.lower()):
preProcWord = self.preprocs[lang][inString.lower()][0]
if preProcWord in d['twords'][0]['options']:
d['twords'][0]['options'].remove(preProcWord)
d['twords'][0]['options'].insert(0, preProcWord)
ret = json.encode({'engOutput': d['twords'][0]['options'][0]})
if callback:
ret = "%s(%s,%s)" % (callback, ret, scid)
return ret
@cherrypy.expose
def processAPIWord(self, inString, lang, rand=None, callback=None, scid=None, key=None):
if key:
if self.validAPIKeys.has_key(key) and lang in self.validAPIKeys.get(key):
return self.processWordJSON(inString, lang, rand, callback, scid)
cherrypy.response.status = 403
@cherrypy.expose
def AddAPIKey(self, key=None, lang=None):
if key and lang:
lang = lang.lower()
if key not in self.validAPIKeys:
self.validAPIKeys[key] = [lang]
elif lang not in self.validAPIKeys[key]:
self.validAPIKeys[key].append(lang)
return json.encode({'status': 'Success'})
return json.encode({'status': 'Error'})
@cherrypy.expose
def RemoveAPIKey(self, key=None):
if key:
if key in self.validAPIKeys:
del self.validAPIKeys[key]
return json.encode({'status': 'Success'})
return json.encode({'status': 'Error'})
@cherrypy.expose
def GetAPILang(self, key=None, callback=None, id=None):
s='Error'
if key:
if key in self.validAPIKeys:
langs = self.validAPIKeys[key]
s = ','.join(langs);
if callback:
s = "%s('%s',%s)" % (callback, s, id)
return s
@cherrypy.expose
def RemoveLanguage(self, key=None, lang=None):
if key and lang:
if key in self.validAPIKeys and lang in self.validAPIKeys.get(key):
self.validAPIKeys.get(key).remove(lang)
if len(self.validAPIKeys.get(key)) == 0:
del self.validAPIKeys[key]
return json.encode({'status': 'Success'})
return json.encode({'status': 'Error'})
@cherrypy.expose
def reverseProcessWord(self, lang, uWord, rand=None, callback=None, scid=None):
#Added for Accenture
d = {}
try:
d['options'] = self.quillProcessor.processReverseWord(uWord.decode('utf-8'), lang)
except KeyError, e:
d['options'] = []
print "Exception: ", e
ret = json.encode(d)
if callback:
ret = "%s(%s, %s)" % (callback, ret, scid)
return ret
@cherrypy.expose
def reverseProcessWordJSON(self, lang, uStr1, uStr2, rand=None, callback=None, scid=None):
d = {}
try:
d['engStr1'] = self.quillManual[lang].unicodeToPrimary(uStr1.decode('utf-8'))
d['engStr2'] = self.quillManual[lang].unicodeToPrimary(uStr2.decode('utf-8'))
except KeyError, e:
d['engStr1'] = uStr1
d['engStr2'] = uStr2
print "Exception: ", e
ret = json.encode(d)
if callback:
ret = "%s(%s, %s)" % (callback, ret, scid)
return ret
@cherrypy.expose
def primaryToUnicodeCherry(self, literal ,rand, lang) :
try:
return literal + "\n" + self.quillManual[lang].primaryToUnicode( literal )[0]
except Exception, e:
logger.error(str(e))
print e
return "-------------";
@cherrypy.expose
def unicodeToPrimaryCherry(self, uStr ,rand, lang) :
try:
return uStr.decode('utf-8') + "\n" + self.quillManual[lang].unicodeToPrimary( uStr.decode('utf-8') )
except Exception, e:
logger.error(str(e))
print e
return "-------------";
@cherrypy.expose
def unicodeToHelperPairCherry(self, uStr, rand, lang):
try:
tuplePair = self.quillManual[lang].unicodeToHelperPair( uStr.decode('utf-8') )
return tuplePair[1] + "\n" + tuplePair[0]
except Exception, e:
logger.error(str(e))
print e
return "-------------";
@cherrypy.expose
def unicodeToHelperStrCherry(self, uStr,rand, lang ) :
try:
return uStr.decode('utf-8') + "\n" + self.quillManual[lang].unicodeToHelperStr( uStr.decode('utf-8') )
except Exception, e:
logger.error(str(e))
print e
return "-------------";
@cherrypy.expose
def getOptionsAtCherry(self, currHelper, currUStr, pos,rand, lang ) :
try:
return currUStr.decode('utf-8') + "\n" + "\n".join(tupleToStrList(self.quillManual[lang].getOptionsAt( currHelper, currUStr.decode('utf-8'), int(pos) )))
except Exception, e:
logger.error(str(e))
print e
return "-------------";
@cherrypy.expose
def getInsertCorrectionsCherry(self, currHelper, currUStr, pos, delta,rand, lang ) :
try:
corrections = currUStr.decode('utf-8') + "\n" + "\n".join(tupleToStrList(self.quillManual[lang].getInsertCorrections( currHelper, currUStr.decode('utf-8'), int(pos), delta )));
return corrections
except Exception, e:
logger.error(str(e))
print e
return "-------------";
@cherrypy.expose
def getDeleteCorrectionsCherry(self, currHelper, currUStr, pos, delLen,rand, lang ) :
try:
return currUStr.decode('utf-8') + "\n" + "\n".join(tupleToStrList(self.quillManual[lang].getDeleteCorrections( currHelper, currUStr.decode('utf-8'), int(pos), int(delLen) )))
except Exception, e:
logger.error(str(e))
print e
return "-------------";
@cherrypy.expose
def getDeleteAndInsertCorrectionsCherry(self, currHelper, currUStr, pos, delLen,rand, lang, insertDelta ) :
try:
pos = int(pos)
delLen = int(delLen)
deleteCorrectionsTuple = tupleToStrList(self.quillManual[lang].getDeleteCorrections( currHelper, currUStr.decode('utf-8'), pos, delLen ))
newHelper = currHelper[0:pos] + currHelper[pos+delLen:]
print "newHelper : " + newHelper
newCurrUStr = deleteCorrectionsTuple[0];
return currUStr.decode('utf-8') + "\n" + "\n".join(tupleToStrList(self.quillManual[lang].getInsertCorrections( newHelper, newCurrUStr, int(pos), insertDelta )));
except Exception, e:
logger.error(str(e))
print e
return "-------------";
def loadPreprocs(self,preprocsListFile) :
self.preprocs = {}
lines = open(preprocsListFile).readlines()
for line in lines[1:] :
(lang,file) = line.split()
self.preprocs[lang] = {}
preprocLines = open(file,'rb').readlines()
for preprocLine in preprocLines[1:] :
l = preprocLine.strip().decode('utf-8').split()
self.preprocs[lang][l[0].lower()] = l[1:]
def buildPrimary(self):
self.quillPrimary = {}
for lang in config.langMap:
print "Loading primary rules for", lang, "...",
self.quillPrimary[lang] = QuillPrimary.QuillRuleBased(config.langMap[lang][0])
print "done"
def __init__(self):
self.processWordDict = {}
self.validAPIKeys = {}
self.quillProcessor = QuillSourceProcessor()
self.quillManual= {"hindi": QuillManual("Hindi_Primary.xml"),"kannada": QuillManual("Kannada_Primary.xml"), "malayalam": QuillManual("Malayalam_Primary.xml"), "marathi": QuillManual("Marathi_Primary.xml"), "tamil": QuillManual("Tamil_Primary.xml"),"telugu": QuillManual("Telugu_Primary.xml")}
self.loadPreprocs("preProcessedWordFiles.txt")
self.buildPrimary()
@cherrypy.expose
def saveErrorMessage(self, message, version, sessionid, rand, lang):
insert_dict = {'message':message, 'version':version, 'sessionid':sessionid, 'language':lang}
sql = insertFromDict("error_log", insert_dict)
try:
cursor = cherrypy.thread_data.db.cursor()
except Exception, e:
connect()
cursor = cherrypy.thread_data.db.cursor()
logger.warn(str(e))
cursor.execute(sql, insert_dict)
cursor.close()
@cherrypy.expose
def saveFeedback(self, message, name, email, version, sessionid, rand, lang):
insert_dict = {'message':message,'name':name,'email':email, 'version':version, 'sessionid':sessionid, 'language':lang, "remote_addr": cherrypy.request.headers['x-forwarded-for'][-90:]}
sql = insertFromDict("feedback", insert_dict)
try:
cursor = cherrypy.thread_data.db.cursor()
except Exception, e:
connect()
cursor = cherrypy.thread_data.db.cursor()
logger.warn(str(e))
cursor.execute(sql, insert_dict)
cursor.close()
def sendMail(self, email_to, email_from, email_replyto, email_subject, html_message, lang):
try:
#msg = ("From: %s\r\nTo: %s\r\nReply-To: %s\r\n\r\n" % (email_from + "<quill@tachyon.in>", ", ".join(email_to.split()), email_replyto))
server = smtplib.SMTP(const.SMTP_SERVER_URL)
if len(const.SMTP_LOGIN_USER) != 0:
server.login( const.SMTP_LOGIN_USER, const.SMTP_LOGIN_PASSWD)
#server.set_debuglevel(1)
send_emails_to = email_to.split(',');
send_emails_to.append(email_replyto);
server.sendmail(email_from + "<quill@tachyon.in>", send_emails_to, html_message)
server.quit()
#recording this in the db
insert_dict = {'lang':lang, 'mail_count':len(email_to.split(','))}
sql = insertFromDict("emails_sent", insert_dict)
try:
cursor = cherrypy.thread_data.db.cursor()
except Exception, e:
connect()
cursor = cherrypy.thread_data.db.cursor()
logger.warn(str(e))
cursor.execute(sql, insert_dict)
cursor.close()
return "success"
except Exception, e:
print e
logger.error(str(e))
return "failed"
@cherrypy.expose
def sendHTMLEmail(self, email_to, email_from, email_replyto, email_subject, email_message, email_message_html, rand, lang):
html_message = createhtmlmail(email_subject, email_message,
getFormattedHTML(email_message_html, lang), email_from + " <quill@tachyon.in>", email_to, email_replyto)
self.sendMail(email_to, email_from, email_replyto, email_subject, html_message, lang)
@cherrypy.expose
def sendEmail(self, email_to, email_from, email_replyto, email_subject, email_message, version, sessionid, rand, lang):
html = getHTML(email_message, lang)
textout = cStringIO.StringIO( )
formtext = formatter.AbstractFormatter(formatter.DumbWriter(textout))
parser = htmllib.HTMLParser(formtext)
parser.feed(html)
parser.close( )
text = textout.getvalue( )
html_message = createhtmlmail(email_subject, text, html, email_from + " <quill@tachyon.in>", email_to, email_replyto)
self.sendMail(email_to, email_from, email_replyto, email_subject, html_message, lang)
@cherrypy.expose
def saveNewWordMapping(self, rand, lang, key, value, mode):
insert_dict = {'wkey':key, 'wvalue':value, 'language':lang, 'mode':mode}
sql = insertFromDict("new_words", insert_dict)
try:
cursor = cherrypy.thread_data.db.cursor()
except Exception, e:
connect()
cursor = cherrypy.thread_data.db.cursor()
logger.warn(str(e))
cursor.execute(sql, insert_dict)
cursor.close()
def getHTML(message, lang=None, withNote=True):
htmlMsg = '<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"><html><head><xmeta content="text/html;charset=utf-8" http-equiv="Content-Type"></head><xbody bgcolor="#ffffff" text="#000000"><pre>'+ message + '</pre>'
if withNote:
htmlMsg += '<br><p>If you are seeing junk characters instead of the correct ' + lang +' characters, in your browser go to \'View->Encoding\' and select the option \'Unicode (UTF-8)\'. To respond to this email in '+ lang +', visit http://quillpad.in/'+lang+'</p>'
htmlMsg += '</xbody></html>'
return htmlMsg
def getFormattedHTML(message, lang):
htmlMsg = '<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"><html><head><xmeta content="text/html;charset=utf-8" http-equiv="Content-Type"></head><xbody>' + message
htmlMsg += '<br><p>If you are seeing junk characters instead of the correct ' + lang +' characters, in your browser go to \'View->Encoding\' and select the option \'Unicode (UTF-8)\'. To respond to this email in '+ lang +', visit http://quillpad.in/'+lang+'</p>'
htmlMsg += '</xbody></html>'
return htmlMsg
def createhtmlmail(subject, text, html, email_from, email_to, email_replyto):
" Create a mime-message that will render as HTML or text, as appropriate"
out = cStringIO.StringIO( ) # output buffer for our message
htmlin = cStringIO.StringIO(html) # input buffer for the HTML
txtin = cStringIO.StringIO(text) # input buffer for the plain text
writer = MimeWriter.MimeWriter(out)
# Set up some basic headers. Place subject here because smtplib.sendmail
# expects it to be in the message, as relevant RFCs prescribe.
writer.addheader("Subject", subject)
writer.addheader("To", email_to)
writer.addheader("MIME-Version", "1.0")
writer.addheader("From", email_from)
writer.addheader("Reply-To", email_replyto)
writer.addheader("Cc", email_replyto)
writer.addheader("Date", email.Utils.formatdate(localtime=1))
writer.addheader("Message-ID", email.Utils.make_msgid())
# Start the multipart section of the message. Multipart/alternative seems
# to work better on some MUAs than multipart/mixed.
writer.startmultipartbody("alternative")
writer.flushheaders( )
# the plain-text section: just copied through, assuming iso-8859-1
subpart = writer.nextpart( )
#pout = subpart.startbody("text/plain", [("charset", 'iso-8859-1')])
pout = subpart.startbody("text/plain", [("charset", 'utf-8')])
pout.write(txtin.read( ))
txtin.close( )
# the HTML subpart of the message: quoted-printable, just in case
subpart = writer.nextpart( )
#subpart.addheader("Content-Transfer-Encoding", "quoted-printable")
subpart.addheader("Content-Transfer-Encoding", "8bit")
#pout = subpart.startbody("text/html", [("charset", 'us-ascii')])
pout = subpart.startbody("text/html", [("charset", 'utf-8')])
#mimetools.encode(htmlin, pout, 'quoted-printable')
mimetools.encode(htmlin, pout, '8bit')
htmlin.close( )
# You're done; close your writer and return the message as a string
writer.lastpart( )
msg = out.getvalue( )
out.close( )
return msg
def insertFromDict(table, dict):
"""Take dictionary object dict and produce sql for
inserting it into the named table"""
sql = 'INSERT INTO ' + table
sql += ' ('
sql += ', '.join(dict)
sql += ') VALUES ('
sql += ', '.join(map(dictValuePad, dict))
sql += ');'
return sql
def dictValuePad(key):
return '%(' + str(key) + ')s'
def connect(thread_index=None):
# Create a connection and store it in the current thread
try:
cherrypy.thread_data.db= MySQLdb.connect(host='127.0.0.1', user='quill', passwd='quill', db='quill', charset='utf8' )
except Exception,e:
print e
logger.error(str(e))
# Tell CherryPy to call "connect" for each thread, when it starts up
cherrypy.engine.subscribe.('start_thread', connect)
def tupleToStrList(ustrList):
return [x[0] for x in ustrList[1] ]
def main() :
"""
cherrypy.root = QuillCherry()
cherrypy.root.quillpad_backend = cherrypy.root
cherrypy.config.update( file='quill_cherry8088.conf')
cherrypy.config.update({'thread_pool': 10})
cherrypy.server.start()
"""
cherrypy._cpconfig.Config('quill_cherry8088.conf')
quillCherry = QuillCherry()
cherrypy.quickstart(quillCherry)
if __name__ == '__main__' :
main()
|
teamtachyon/Quillpad-Server
|
startquill_cherry.py
|
Python
|
bsd-3-clause
| 21,836
|
[
"VisIt"
] |
b25d61154f967362b02088990bd91e64a9ccf9499011741feb98f231e86df3e6
|
"""
langcodes knows what languages are. It knows the standardized codes that
refer to them, such as `en` for English, `es` for Spanish and `hi` for Hindi.
Often, it knows what these languages are called *in* a language, and that
language doesn't have to be English.
See README.md for the main documentation, or read it on GitHub at
https://github.com/LuminosoInsight/langcodes/ . For more specific documentation
on the functions in langcodes, scroll down and read the docstrings.
Some of these functions, particularly those that work with the names of
languages, require the `language_data` module to be installed.
"""
from operator import itemgetter
from typing import Any, List, Tuple, Dict, Sequence, Iterable, Optional, Mapping, Union
import warnings
import sys
from langcodes.tag_parser import parse_tag, normalize_characters
from langcodes.language_distance import tuple_distance_cached
from langcodes.data_dicts import (
DEFAULT_SCRIPTS,
LANGUAGE_REPLACEMENTS,
LANGUAGE_ALPHA3,
LANGUAGE_ALPHA3_BIBLIOGRAPHIC,
SCRIPT_REPLACEMENTS,
TERRITORY_REPLACEMENTS,
NORMALIZED_MACROLANGUAGES,
LIKELY_SUBTAGS,
VALIDITY,
)
# When we're getting natural language information *about* languages, it's in
# English if you don't specify the language.
DEFAULT_LANGUAGE = 'en'
LANGUAGE_NAME_IMPORT_MESSAGE = """
Looking up language names now requires the `language_data` package.
Install it with:
pip install language_data
Or as an optional feature of langcodes:
pip install langcodes[data]
"""
class Language:
"""
The Language class defines the results of parsing a language tag.
Language objects have the following attributes, any of which may be
unspecified (in which case their value is None):
- *language*: the code for the language itself.
- *script*: the 4-letter code for the writing system being used.
- *territory*: the 2-letter or 3-digit code for the country or similar territory
of the world whose usage of the language appears in this text.
- *extlangs*: a list of more specific language codes that follow the language
code. (This is allowed by the language code syntax, but deprecated.)
- *variants*: codes for specific variations of language usage that aren't
covered by the *script* or *territory* codes.
- *extensions*: information that's attached to the language code for use in
some specific system, such as Unicode collation orders.
- *private*: a code starting with `x-` that has no defined meaning.
The `Language.get` method converts a string to a Language instance.
It's also available at the top level of this module as the `get` function.
"""
ATTRIBUTES = [
'language',
'extlangs',
'script',
'territory',
'variants',
'extensions',
'private',
]
# When looking up "likely subtags" data, we try looking up the data for
# increasingly less specific versions of the language code.
BROADER_KEYSETS = [
{'language', 'script', 'territory'},
{'language', 'territory'},
{'language', 'script'},
{'language'},
{'script'},
{},
]
MATCHABLE_KEYSETS = [
{'language', 'script', 'territory'},
{'language', 'script'},
{'language'},
]
# Values cached at the class level
_INSTANCES: Dict[tuple, 'Language'] = {}
_PARSE_CACHE: Dict[Tuple[str, bool], 'Language'] = {}
def __init__(
self,
language: Optional[str] = None,
extlangs: Optional[Sequence[str]] = None,
script: Optional[str] = None,
territory: Optional[str] = None,
variants: Optional[Sequence[str]] = None,
extensions: Optional[Sequence[str]] = None,
private: Optional[str] = None,
):
"""
The constructor for Language objects.
It's inefficient to call this directly, because it can't return
an existing instance. Instead, call Language.make(), which
has the same signature.
"""
self.language = language
self.extlangs = extlangs
self.script = script
self.territory = territory
self.variants = variants
self.extensions = extensions
self.private = private
# Cached values
self._simplified: 'Language' = None
self._searchable: 'Language' = None
self._broader: List[str] = None
self._assumed: 'Language' = None
self._filled: 'Language' = None
self._macrolanguage: Optional['Language'] = None
self._str_tag: str = None
self._dict: dict = None
self._disp_separator: str = None
self._disp_pattern: str = None
# Make sure the str_tag value is cached
self.to_tag()
@classmethod
def make(
cls,
language: Optional[str] = None,
extlangs: Optional[Sequence[str]] = None,
script: Optional[str] = None,
territory: Optional[str] = None,
variants: Optional[Sequence[str]] = None,
extensions: Optional[Sequence[str]] = None,
private: Optional[str] = None,
) -> 'Language':
"""
Create a Language object by giving any subset of its attributes.
If this value has been created before, return the existing value.
"""
values = (
language,
tuple(extlangs or ()),
script,
territory,
tuple(variants or ()),
tuple(extensions or ()),
private,
)
if values in cls._INSTANCES:
return cls._INSTANCES[values]
instance = cls(
language=language,
extlangs=extlangs,
script=script,
territory=territory,
variants=variants,
extensions=extensions,
private=private,
)
cls._INSTANCES[values] = instance
return instance
@staticmethod
def get(tag: Union[str, 'Language'], normalize=True) -> 'Language':
"""
Create a Language object from a language tag string.
If normalize=True, non-standard or overlong tags will be replaced as
they're interpreted. This is recommended.
Here are several examples of language codes, which are also test cases.
Most language codes are straightforward, but these examples will get
pretty obscure toward the end.
>>> Language.get('en-US')
Language.make(language='en', territory='US')
>>> Language.get('zh-Hant')
Language.make(language='zh', script='Hant')
>>> Language.get('und')
Language.make()
This function is idempotent, in case you already have a Language object:
>>> Language.get(Language.get('en-us'))
Language.make(language='en', territory='US')
The non-code 'root' is sometimes used to represent the lack of any
language information, similar to 'und'.
>>> Language.get('root')
Language.make()
By default, getting a Language object will automatically convert
deprecated tags:
>>> Language.get('iw')
Language.make(language='he')
>>> Language.get('in')
Language.make(language='id')
>>> Language.get('zh-HK')
Language.make(language='zh', script='Hant', territory='HK')
>>> Language.get('zh_HK')
Language.make(language='zh', script='Hant', territory='HK')
One type of deprecated tag that should be replaced is for sign
languages, which used to all be coded as regional variants of a
fictitious global sign language called 'sgn'. Of course, there is no
global sign language, so sign languages now have their own language
codes.
>>> Language.get('sgn-US')
Language.make(language='ase')
>>> Language.get('sgn-US', normalize=False)
Language.make(language='sgn', territory='US')
'en-gb-oed' is a tag that's grandfathered into the standard because it
has been used to mean "spell-check this with Oxford English Dictionary
spelling", but that tag has the wrong shape. We interpret this as the
new standardized tag 'en-gb-oxendict', unless asked not to normalize.
>>> Language.get('en-gb-oed')
Language.make(language='en', territory='GB', variants=['oxendict'])
>>> Language.get('en-gb-oed', normalize=False)
Language.make(language='en-gb-oed')
'zh-min-nan' is another oddly-formed tag, used to represent the
Southern Min language, which includes Taiwanese as a regional form. It
now has its own language code.
>>> Language.get('zh-min-nan')
Language.make(language='nan')
The vague tag 'zh-min' is now also interpreted as 'nan', with a private
extension indicating that it had a different form:
>>> Language.get('zh-min')
Language.make(language='nan', private='x-zh-min')
Occasionally Wiktionary will use 'extlang' tags in strange ways, such
as using the tag 'und-ibe' for some unspecified Iberian language.
>>> Language.get('und-ibe')
Language.make(extlangs=['ibe'])
Here's an example of replacing multiple deprecated tags.
The language tag 'sh' (Serbo-Croatian) ended up being politically
problematic, and different standards took different steps to address
this. The IANA made it into a macrolanguage that contains 'sr', 'hr',
and 'bs'. Unicode further decided that it's a legacy tag that should
be interpreted as 'sr-Latn', which the language matching rules say
is mutually intelligible with all those languages.
We complicate the example by adding on the territory tag 'QU', an old
provisional tag for the European Union, which is now standardized as
'EU'.
>>> Language.get('sh-QU')
Language.make(language='sr', script='Latn', territory='EU')
"""
if isinstance(tag, Language):
if not normalize:
# shortcut: we have the tag already
return tag
# We might need to normalize this tag. Convert it back into a
# string tag, to cover all the edge cases of normalization in a
# way that we've already solved.
tag = tag.to_tag()
if (tag, normalize) in Language._PARSE_CACHE:
return Language._PARSE_CACHE[tag, normalize]
data: Dict[str, Any] = {}
# If the complete tag appears as something to normalize, do the
# normalization right away. Smash case and convert underscores to
# hyphens when checking, because the case normalization that comes from
# parse_tag() hasn't been applied yet.
tag_lower = normalize_characters(tag)
if normalize and tag_lower in LANGUAGE_REPLACEMENTS:
tag = LANGUAGE_REPLACEMENTS[tag_lower]
components = parse_tag(tag)
for typ, value in components:
if typ == 'extlang' and normalize and 'language' in data:
# smash extlangs when possible
minitag = f"{data['language']}-{value}"
norm = LANGUAGE_REPLACEMENTS.get(normalize_characters(minitag))
if norm is not None:
data.update(Language.get(norm, normalize).to_dict())
else:
data.setdefault('extlangs', []).append(value)
elif typ in {'extlang', 'variant', 'extension'}:
data.setdefault(typ + 's', []).append(value)
elif typ == 'language':
if value == 'und':
pass
elif normalize:
replacement = LANGUAGE_REPLACEMENTS.get(value.lower())
if replacement is not None:
# parse the replacement if necessary -- this helps with
# Serbian and Moldovan
data.update(Language.get(replacement, normalize).to_dict())
else:
data['language'] = value
else:
data['language'] = value
elif typ == 'territory':
if normalize:
data['territory'] = TERRITORY_REPLACEMENTS.get(value.lower(), value)
else:
data['territory'] = value
elif typ == 'grandfathered':
# If we got here, we got a grandfathered tag but we were asked
# not to normalize it, or the CLDR data doesn't know how to
# normalize it. The best we can do is set the entire tag as the
# language.
data['language'] = value
else:
data[typ] = value
result = Language.make(**data)
Language._PARSE_CACHE[tag, normalize] = result
return result
def to_tag(self) -> str:
"""
Convert a Language back to a standard language tag, as a string.
This is also the str() representation of a Language object.
>>> Language.make(language='en', territory='GB').to_tag()
'en-GB'
>>> Language.make(language='yue', script='Hant', territory='HK').to_tag()
'yue-Hant-HK'
>>> Language.make(script='Arab').to_tag()
'und-Arab'
>>> str(Language.make(territory='IN'))
'und-IN'
"""
if self._str_tag is not None:
return self._str_tag
subtags = ['und']
if self.language:
subtags[0] = self.language
if self.extlangs:
for extlang in sorted(self.extlangs):
subtags.append(extlang)
if self.script:
subtags.append(self.script)
if self.territory:
subtags.append(self.territory)
if self.variants:
for variant in sorted(self.variants):
subtags.append(variant)
if self.extensions:
for ext in self.extensions:
subtags.append(ext)
if self.private:
subtags.append(self.private)
self._str_tag = '-'.join(subtags)
return self._str_tag
def simplify_script(self) -> 'Language':
"""
Remove the script from some parsed language data, if the script is
redundant with the language.
>>> Language.make(language='en', script='Latn').simplify_script()
Language.make(language='en')
>>> Language.make(language='yi', script='Latn').simplify_script()
Language.make(language='yi', script='Latn')
>>> Language.make(language='yi', script='Hebr').simplify_script()
Language.make(language='yi')
"""
if self._simplified is not None:
return self._simplified
if self.language and self.script:
if DEFAULT_SCRIPTS.get(self.language) == self.script:
result = self.update_dict({'script': None})
self._simplified = result
return self._simplified
self._simplified = self
return self._simplified
def assume_script(self) -> 'Language':
"""
Fill in the script if it's missing, and if it can be assumed from the
language subtag. This is the opposite of `simplify_script`.
>>> Language.make(language='en').assume_script()
Language.make(language='en', script='Latn')
>>> Language.make(language='yi').assume_script()
Language.make(language='yi', script='Hebr')
>>> Language.make(language='yi', script='Latn').assume_script()
Language.make(language='yi', script='Latn')
This fills in nothing when the script cannot be assumed -- such as when
the language has multiple scripts, or it has no standard orthography:
>>> Language.make(language='sr').assume_script()
Language.make(language='sr')
>>> Language.make(language='eee').assume_script()
Language.make(language='eee')
It also dosn't fill anything in when the language is unspecified.
>>> Language.make(territory='US').assume_script()
Language.make(territory='US')
"""
if self._assumed is not None:
return self._assumed
if self.language and not self.script:
try:
self._assumed = self.update_dict(
{'script': DEFAULT_SCRIPTS[self.language]}
)
except KeyError:
self._assumed = self
else:
self._assumed = self
return self._assumed
def prefer_macrolanguage(self) -> 'Language':
"""
BCP 47 doesn't specify what to do with macrolanguages and the languages
they contain. The Unicode CLDR, on the other hand, says that when a
macrolanguage has a dominant standardized language, the macrolanguage
code should be used for that language. For example, Mandarin Chinese
is 'zh', not 'cmn', according to Unicode, and Malay is 'ms', not 'zsm'.
This isn't a rule you'd want to follow in all cases -- for example, you may
want to be able to specifically say that 'ms' (the Malay macrolanguage)
contains both 'zsm' (Standard Malay) and 'id' (Indonesian). But applying
this rule helps when interoperating with the Unicode CLDR.
So, applying `prefer_macrolanguage` to a Language object will
return a new object, replacing the language with the macrolanguage if
it is the dominant language within that macrolanguage. It will leave
non-dominant languages that have macrolanguages alone.
>>> Language.get('arb').prefer_macrolanguage()
Language.make(language='ar')
>>> Language.get('cmn-Hant').prefer_macrolanguage()
Language.make(language='zh', script='Hant')
>>> Language.get('yue-Hant').prefer_macrolanguage()
Language.make(language='yue', script='Hant')
"""
if self._macrolanguage is not None:
return self._macrolanguage
language = self.language or 'und'
if language in NORMALIZED_MACROLANGUAGES:
self._macrolanguage = self.update_dict(
{'language': NORMALIZED_MACROLANGUAGES[language]}
)
else:
self._macrolanguage = self
return self._macrolanguage
def to_alpha3(self, variant: str = 'T') -> str:
"""
Get the three-letter language code for this language, even if it's
canonically written with a two-letter code.
These codes are the 'alpha3' codes defined by ISO 639-2.
When this function returns, it always returns a 3-letter string. If
there is no known alpha3 code for the language, it raises a LookupError.
In cases where the distinction matters, we default to the 'terminology'
code. You can pass `variant='B'` to get the 'bibliographic' code instead.
For example, the terminology code for German is 'deu', while the
bibliographic code is 'ger'.
(The confusion between these two sets of codes is a good reason to avoid
using alpha3 codes. Every language that has two different alpha3 codes
also has an alpha2 code that's preferred, such as 'de' for German.)
>>> Language.get('fr').to_alpha3()
'fra'
>>> Language.get('fr-CA').to_alpha3()
'fra'
>>> Language.get('fr').to_alpha3(variant='B')
'fre'
>>> Language.get('de').to_alpha3(variant='T')
'deu'
>>> Language.get('ja').to_alpha3()
'jpn'
>>> Language.get('un').to_alpha3()
Traceback (most recent call last):
...
LookupError: 'un' is not a known language code, and has no alpha3 code.
All valid two-letter language codes have corresponding alpha3 codes,
even the un-normalized ones. If they were assigned an alpha3 code by ISO
before they were assigned a normalized code by CLDR, these codes may be
different:
>>> Language.get('tl', normalize=False).to_alpha3()
'tgl'
>>> Language.get('tl').to_alpha3()
'fil'
>>> Language.get('sh', normalize=False).to_alpha3()
'hbs'
Three-letter codes are preserved, even if they're unknown:
>>> Language.get('qqq').to_alpha3()
'qqq'
>>> Language.get('und').to_alpha3()
'und'
"""
variant = variant.upper()
if variant not in 'BT':
raise ValueError("Variant must be 'B' or 'T'")
language = self.language
if language is None:
return 'und'
elif len(language) == 3:
return language
else:
if variant == 'B' and language in LANGUAGE_ALPHA3_BIBLIOGRAPHIC:
return LANGUAGE_ALPHA3_BIBLIOGRAPHIC[language]
elif language in LANGUAGE_ALPHA3:
return LANGUAGE_ALPHA3[language]
else:
raise LookupError(
"{!r} is not a known language code, and has no alpha3 code.".format(
language
)
)
def broader_tags(self) -> List[str]:
"""
Iterate through increasingly general tags for this language.
This isn't actually that useful for matching two arbitrary language tags
against each other, but it is useful for matching them against a known
standardized form, such as in the CLDR data.
The list of broader versions to try appears in UTR 35, section 4.3,
"Likely Subtags".
>>> Language.get('nn-Latn-NO-x-thingy').broader_tags()
['nn-Latn-NO-x-thingy', 'nn-Latn-NO', 'nn-NO', 'nn-Latn', 'nn', 'und-Latn', 'und']
>>> Language.get('arb-Arab').broader_tags()
['arb-Arab', 'ar-Arab', 'arb', 'ar', 'und-Arab', 'und']
"""
if self._broader is not None:
return self._broader
self._broader = [self.to_tag()]
seen = set([self.to_tag()])
for keyset in self.BROADER_KEYSETS:
for start_language in (self, self.prefer_macrolanguage()):
filtered = start_language._filter_attributes(keyset)
tag = filtered.to_tag()
if tag not in seen:
self._broader.append(tag)
seen.add(tag)
return self._broader
def broaden(self) -> 'List[Language]':
"""
Like `broader_tags`, but returrns Language objects instead of strings.
"""
return [Language.get(tag) for tag in self.broader_tags()]
def maximize(self) -> 'Language':
"""
The Unicode CLDR contains a "likelySubtags" data file, which can guess
reasonable values for fields that are missing from a language tag.
This is particularly useful for comparing, for example, "zh-Hant" and
"zh-TW", two common language tags that say approximately the same thing
via rather different information. (Using traditional Han characters is
not the same as being in Taiwan, but each implies that the other is
likely.)
These implications are provided in the CLDR supplemental data, and are
based on the likelihood of people using the language to transmit text
on the Internet. (This is why the overall default is English, not
Chinese.)
It's important to recognize that these tags amplify majorities, and
that not all language support fits into a "likely" language tag.
>>> str(Language.get('zh-Hant').maximize())
'zh-Hant-TW'
>>> str(Language.get('zh-TW').maximize())
'zh-Hant-TW'
>>> str(Language.get('ja').maximize())
'ja-Jpan-JP'
>>> str(Language.get('pt').maximize())
'pt-Latn-BR'
>>> str(Language.get('und-Arab').maximize())
'ar-Arab-EG'
>>> str(Language.get('und-CH').maximize())
'de-Latn-CH'
As many standards are, this is US-centric:
>>> str(Language.make().maximize())
'en-Latn-US'
"Extlangs" have no likely-subtags information, so they will give
maximized results that make no sense:
>>> str(Language.get('und-ibe').maximize())
'en-ibe-Latn-US'
"""
if self._filled is not None:
return self._filled
for tag in self.broader_tags():
if tag in LIKELY_SUBTAGS:
result = Language.get(LIKELY_SUBTAGS[tag], normalize=False)
result = result.update(self)
self._filled = result
return result
raise RuntimeError(
"Couldn't fill in likely values. This represents a problem with "
"the LIKELY_SUBTAGS data."
)
# Support an old, wordier name for the method
fill_likely_values = maximize
def match_score(self, supported: 'Language') -> int:
"""
DEPRECATED: use .distance() instead, which uses newer data and is _lower_
for better matching languages.
"""
warnings.warn(
"`match_score` is deprecated because it's based on deprecated CLDR info. "
"Use `distance` instead, which is _lower_ for better matching languages. ",
DeprecationWarning,
)
return 100 - min(self.distance(supported), 100)
def distance(self, supported: 'Language') -> int:
"""
Suppose that `self` is the language that the user desires, and
`supported` is a language that is actually supported.
This method returns a number from 0 to 134 measuring the 'distance'
between the languages (lower numbers are better). This is not a
symmetric relation.
The language distance is not really about the linguistic similarity or
history of the languages; instead, it's based largely on sociopolitical
factors, indicating which language speakers are likely to know which
other languages in the present world. Much of the heuristic is about
finding a widespread 'world language' like English, Chinese, French, or
Russian that speakers of a more localized language will accept.
A version that works on language tags, as strings, is in the function
`tag_distance`. See that function for copious examples.
"""
if supported == self:
return 0
# CLDR has realized that these matching rules are undermined when the
# unspecified language 'und' gets maximized to 'en-Latn-US', so this case
# is specifically not maximized:
if self.language is None and self.script is None and self.territory is None:
desired_triple = ('und', 'Zzzz', 'ZZ')
else:
desired_complete = self.prefer_macrolanguage().maximize()
desired_triple = (
desired_complete.language,
desired_complete.script,
desired_complete.territory,
)
if (
supported.language is None
and supported.script is None
and supported.territory is None
):
supported_triple = ('und', 'Zzzz', 'ZZ')
else:
supported_complete = supported.prefer_macrolanguage().maximize()
supported_triple = (
supported_complete.language,
supported_complete.script,
supported_complete.territory,
)
return tuple_distance_cached(desired_triple, supported_triple)
def is_valid(self) -> bool:
"""
Checks whether the language, script, territory, and variants
(if present) are all tags that have meanings assigned by IANA.
For example, 'ja' (Japanese) is a valid tag, and 'jp' is not.
The data is current as of CLDR 38.1.
>>> Language.get('ja').is_valid()
True
>>> Language.get('jp').is_valid()
False
>>> Language.get('en-001').is_valid()
True
>>> Language.get('en-000').is_valid()
False
>>> Language.get('und').is_valid()
True
>>> Language.get('en-GB-oxendict').is_valid()
True
>>> Language.get('en-GB-oxenfree').is_valid()
False
Of course, you should be prepared to catch a failure to parse the
language code at all:
>>> Language.get('C').is_valid()
Traceback (most recent call last):
...
langcodes.tag_parser.LanguageTagError: Expected a language code, got 'c'
"""
subtags = [self.language, self.script, self.territory]
if self.variants is not None:
subtags.extend(self.variants)
for subtag in subtags:
if subtag is not None:
if not VALIDITY.match(subtag):
return False
return True
def has_name_data(self) -> bool:
"""
Return True when we can name languages in this language. Requires
`language_data` to be installed.
This is true when the language, or one of its 'broader' versions, is in
the list of CLDR target languages.
>>> Language.get('fr').has_name_data()
True
>>> Language.get('so').has_name_data()
True
>>> Language.get('enc').has_name_data()
False
>>> Language.get('und').has_name_data()
False
"""
try:
from language_data.name_data import LANGUAGES_WITH_NAME_DATA
except ImportError:
print(LANGUAGE_NAME_IMPORT_MESSAGE, file=sys.stdout)
raise
matches = set(self.broader_tags()) & LANGUAGES_WITH_NAME_DATA
return bool(matches)
# These methods help to show what the language tag means in natural
# language. They actually apply the language-matching algorithm to find
# the right language to name things in.
def _get_name(
self, attribute: str, language: Union[str, 'Language'], max_distance: int
) -> str:
try:
from language_data.names import code_to_names
except ImportError:
print(LANGUAGE_NAME_IMPORT_MESSAGE, file=sys.stdout)
raise
assert attribute in self.ATTRIBUTES
if isinstance(language, str):
language = Language.get(language)
attr_value = getattr(self, attribute)
if attr_value is None:
if attribute == 'language':
attr_value = 'und'
else:
return None
names = code_to_names(attr_value)
result = self._best_name(names, language, max_distance)
if result is not None:
return result
else:
# Construct a string like "Unknown language [zzz]"
placeholder = None
if attribute == 'language':
placeholder = 'und'
elif attribute == 'script':
placeholder = 'Zzzz'
elif attribute == 'territory':
placeholder = 'ZZ'
unknown_name = None
if placeholder is not None:
names = code_to_names(placeholder)
unknown_name = self._best_name(names, language, max_distance)
if unknown_name is None:
unknown_name = 'Unknown language subtag'
return f'{unknown_name} [{attr_value}]'
def _best_name(
self, names: Mapping[str, str], language: 'Language', max_distance: int
):
matchable_languages = set(language.broader_tags())
possible_languages = [
key for key in sorted(names.keys()) if key in matchable_languages
]
target_language, score = closest_match(
language, possible_languages, max_distance
)
if target_language in names:
return names[target_language]
else:
return names.get(DEFAULT_LANGUAGE)
def language_name(
self,
language: Union[str, 'Language'] = DEFAULT_LANGUAGE,
max_distance: int = 25,
) -> str:
"""
Give the name of the language (not the entire tag, just the language part)
in a natural language. The target language can be given as a string or
another Language object.
By default, things are named in English:
>>> Language.get('fr').language_name()
'French'
>>> Language.get('el').language_name()
'Greek'
But you can ask for language names in numerous other languages:
>>> Language.get('fr').language_name('fr')
'français'
>>> Language.get('el').language_name('fr')
'grec'
Why does everyone get Slovak and Slovenian confused? Let's ask them.
>>> Language.get('sl').language_name('sl')
'slovenščina'
>>> Language.get('sk').language_name('sk')
'slovenčina'
>>> Language.get('sl').language_name('sk')
'slovinčina'
>>> Language.get('sk').language_name('sl')
'slovaščina'
"""
return self._get_name('language', language, max_distance)
def display_name(
self,
language: Union[str, 'Language'] = DEFAULT_LANGUAGE,
max_distance: int = 25,
) -> str:
"""
It's often helpful to be able to describe a language code in a way that a user
(or you) can understand, instead of in inscrutable short codes. The
`display_name` method lets you describe a Language object *in a language*.
The `.display_name(language, min_score)` method will look up the name of the
language. The names come from the IANA language tag registry, which is only in
English, plus CLDR, which names languages in many commonly-used languages.
The default language for naming things is English:
>>> Language.make(language='fr').display_name()
'French'
>>> Language.make().display_name()
'Unknown language'
>>> Language.get('zh-Hans').display_name()
'Chinese (Simplified)'
>>> Language.get('en-US').display_name()
'English (United States)'
But you can ask for language names in numerous other languages:
>>> Language.get('fr').display_name('fr')
'français'
>>> Language.get('fr').display_name('es')
'francés'
>>> Language.make().display_name('es')
'lengua desconocida'
>>> Language.get('zh-Hans').display_name('de')
'Chinesisch (Vereinfacht)'
>>> Language.get('en-US').display_name('zh-Hans')
'英语(美国)'
"""
reduced = self.simplify_script()
language = Language.get(language)
language_name = reduced.language_name(language, max_distance)
extra_parts = []
if reduced.script is not None:
extra_parts.append(reduced.script_name(language, max_distance))
if reduced.territory is not None:
extra_parts.append(reduced.territory_name(language, max_distance))
if extra_parts:
clarification = language._display_separator().join(extra_parts)
pattern = language._display_pattern()
return pattern.format(language_name, clarification)
else:
return language_name
def _display_pattern(self) -> str:
"""
Get the pattern, according to CLDR, that should be used for clarifying
details of a language code.
"""
# Technically we are supposed to look up this pattern in each language.
# Practically, it's the same in every language except Chinese, where the
# parentheses are full-width.
if self._disp_pattern is not None:
return self._disp_pattern
if self.distance(Language.get('zh')) <= 25:
self._disp_pattern = "{0}({1})"
else:
self._disp_pattern = "{0} ({1})"
return self._disp_pattern
def _display_separator(self) -> str:
"""
Get the symbol that should be used to separate multiple clarifying
details -- such as a comma in English, or an ideographic comma in
Japanese.
Requires that `language_data` is installed.
"""
try:
from language_data.names import DISPLAY_SEPARATORS
except ImportError:
print(LANGUAGE_NAME_IMPORT_MESSAGE, file=sys.stdout)
raise
if self._disp_separator is not None:
return self._disp_separator
matched, _dist = closest_match(self, DISPLAY_SEPARATORS.keys())
self._disp_separator = DISPLAY_SEPARATORS[matched]
return self._disp_separator
def autonym(self, max_distance: int = 9) -> str:
"""
Give the display name of this language *in* this language.
Requires that `language_data` is installed.
>>> Language.get('fr').autonym()
'français'
>>> Language.get('es').autonym()
'español'
>>> Language.get('ja').autonym()
'日本語'
This uses the `display_name()` method, so it can include the name of a
script or territory when appropriate.
>>> Language.get('en-AU').autonym()
'English (Australia)'
>>> Language.get('sr-Latn').autonym()
'srpski (latinica)'
>>> Language.get('sr-Cyrl').autonym()
'српски (ћирилица)'
>>> Language.get('pa').autonym()
'ਪੰਜਾਬੀ'
>>> Language.get('pa-Arab').autonym()
'پنجابی (عربی)'
This only works for language codes that CLDR has locale data for. You
can't ask for the autonym of 'ja-Latn' and get 'nihongo (rōmaji)'.
"""
lang = self.prefer_macrolanguage()
return lang.display_name(language=lang, max_distance=max_distance)
def script_name(
self,
language: Union[str, 'Language'] = DEFAULT_LANGUAGE,
max_distance: int = 25,
) -> str:
"""
Describe the script part of the language tag in a natural language.
Requires that `language_data` is installed.
"""
return self._get_name('script', language, max_distance)
def territory_name(
self,
language: Union[str, 'Language'] = DEFAULT_LANGUAGE,
max_distance: int = 25,
) -> str:
"""
Describe the territory part of the language tag in a natural language.
Requires that `language_data` is installed.
"""
return self._get_name('territory', language, max_distance)
def region_name(
self,
language: Union[str, 'Language'] = DEFAULT_LANGUAGE,
max_distance: int = 25,
) -> str:
warnings.warn(
"`region_name` has been renamed to `territory_name` for consistency",
DeprecationWarning,
)
return self.territory_name(language, max_distance)
@property
def region(self):
warnings.warn(
"The `region` property has been renamed to `territory` for consistency",
DeprecationWarning,
)
return self.territory
def variant_names(
self,
language: Union[str, 'Language'] = DEFAULT_LANGUAGE,
max_distance: int = 25,
) -> Sequence[str]:
"""
Deprecated in version 3.0.
We don't store names for variants anymore, so this just returns the list
of variant codes, such as ['oxendict'] for en-GB-oxendict.
"""
warnings.warn(
"variant_names is deprecated and just returns the variant codes",
DeprecationWarning,
)
return self.variants or []
def describe(
self,
language: Union[str, 'Language'] = DEFAULT_LANGUAGE,
max_distance: int = 25,
) -> dict:
"""
Return a dictionary that describes a given language tag in a specified
natural language. Requires that `language_data` is installed.
See `language_name` and related methods for more specific versions of this.
The desired `language` will in fact be matched against the available
options using the matching technique that this module provides. We can
illustrate many aspects of this by asking for a description of Shavian
script (a phonetic script for English devised by author George Bernard
Shaw), and where you might find it, in various languages.
>>> shaw = Language.make(script='Shaw').maximize()
>>> shaw.describe('en')
{'language': 'English', 'script': 'Shavian', 'territory': 'United Kingdom'}
>>> shaw.describe('fr')
{'language': 'anglais', 'script': 'shavien', 'territory': 'Royaume-Uni'}
>>> shaw.describe('es')
{'language': 'inglés', 'script': 'shaviano', 'territory': 'Reino Unido'}
>>> shaw.describe('pt')
{'language': 'inglês', 'script': 'shaviano', 'territory': 'Reino Unido'}
>>> shaw.describe('uk')
{'language': 'англійська', 'script': 'шоу', 'territory': 'Велика Британія'}
>>> shaw.describe('arb')
{'language': 'الإنجليزية', 'script': 'الشواني', 'territory': 'المملكة المتحدة'}
>>> shaw.describe('th')
{'language': 'อังกฤษ', 'script': 'ซอเวียน', 'territory': 'สหราชอาณาจักร'}
>>> shaw.describe('zh-Hans')
{'language': '英语', 'script': '萧伯纳式文', 'territory': '英国'}
>>> shaw.describe('zh-Hant')
{'language': '英文', 'script': '簫柏納字符', 'territory': '英國'}
>>> shaw.describe('ja')
{'language': '英語', 'script': 'ショー文字', 'territory': 'イギリス'}
When we don't have a localization for the language, we fall back on English,
because the IANA provides names for all known codes in English.
>>> shaw.describe('lol')
{'language': 'English', 'script': 'Shavian', 'territory': 'United Kingdom'}
When the language tag itself is a valid tag but with no known meaning, we
say so in the appropriate language.
>>> Language.get('xyz-ZY').display_name()
'Unknown language [xyz] (Unknown Region [ZY])'
>>> Language.get('xyz-ZY').display_name('es')
'lengua desconocida [xyz] (Región desconocida [ZY])'
"""
names = {}
if self.language:
names['language'] = self.language_name(language, max_distance)
if self.script:
names['script'] = self.script_name(language, max_distance)
if self.territory:
names['territory'] = self.territory_name(language, max_distance)
return names
def speaking_population(self) -> int:
"""
Get an estimate of how many people in the world speak this language,
derived from CLDR data. Requires that `language_data` is installed.
Only the language and territory codes will be considered. If a
territory code is included, the population will count only the
speakers of the language in that territory.
Script subtags are disregarded, because it doesn't make sense to ask
how many people speak in a particular writing script.
>>> Language.get('es').speaking_population()
487664083
>>> Language.get('pt').speaking_population()
237135429
>>> Language.get('es-BR').speaking_population()
76218
>>> Language.get('pt-BR').speaking_population()
192661560
>>> Language.get('vo').speaking_population()
0
"""
try:
from language_data.population_data import LANGUAGE_SPEAKING_POPULATION
except ImportError:
print(LANGUAGE_NAME_IMPORT_MESSAGE, file=sys.stdout)
raise
lang = self._filter_attributes(['language', 'territory'])
return LANGUAGE_SPEAKING_POPULATION.get(str(lang), 0)
def writing_population(self) -> int:
"""
Get an estimate of how many people in the world read and write
this language, derived from CLDR data. Requires that `language_data`
is installed.
For many languages that aren't typically written, this is an
overestimate, according to CLDR -- the data often includes people who
speak that language but write in a different language.
Only the language, script, and territory codes will be considered.
If a territory code is included, the population will count only the
speakers of the language in that territory.
>>> all = Language.get('zh').writing_population()
>>> all
1240326057
>>> traditional = Language.get('zh-Hant').writing_population()
>>> traditional
37019589
>>> simplified = Language.get('zh-Hans').writing_population()
>>> all == traditional + simplified
True
>>> Language.get('zh-Hant-HK').writing_population()
6439733
>>> Language.get('zh-Hans-HK').writing_population()
338933
Note that if you want to get the total Chinese writing population
of Hong Kong, you need to avoid normalization that would interpret
'zh-HK' as 'zh-Hant-HK'.
>>> Language.get('zh-HK', normalize=False).writing_population()
6778666
Unknown or unspecified language codes get a population of 0.
>>> Language.get('xyz').writing_population()
0
>>> Language.get('und').writing_population()
0
"""
try:
from language_data.population_data import LANGUAGE_WRITING_POPULATION
except ImportError:
print(LANGUAGE_NAME_IMPORT_MESSAGE, file=sys.stdout)
raise
lang = self._filter_attributes(['language', 'script', 'territory'])
if str(lang) in LANGUAGE_WRITING_POPULATION:
return LANGUAGE_WRITING_POPULATION[str(lang)]
else:
lang = lang.simplify_script()
return LANGUAGE_WRITING_POPULATION.get(str(lang), 0)
@staticmethod
def find_name(
tagtype: str, name: str, language: Optional[Union[str, 'Language']] = None
) -> 'Language':
"""
Find the subtag of a particular `tagtype` that has the given `name`.
Requires that `language_data` is installed.
The default language, "und", will allow matching names in any language,
so you can get the code 'fr' by looking up "French", "Français", or
"francés".
Occasionally, names are ambiguous in a way that can be resolved by
specifying what name the language is supposed to be in. For example,
there is a language named 'Malayo' in English, but it's different from
the language named 'Malayo' in Spanish (which is Malay). Specifying the
language will look up the name in a trie that is only in that language.
In a previous version, we thought we were going to deprecate the
`language` parameter, as there weren't significant cases of conflicts
in names of things between languages. Well, we got more data, and
conflicts in names are everywhere.
Specifying the language that the name should be in is still not
required, but it will help to make sure that names can be
round-tripped.
>>> Language.find_name('language', 'francés')
Language.make(language='fr')
>>> Language.find_name('territory', 'United Kingdom')
Language.make(territory='GB')
>>> Language.find_name('script', 'Arabic')
Language.make(script='Arab')
>>> Language.find_name('language', 'norsk bokmål')
Language.make(language='nb')
>>> Language.find_name('language', 'norsk')
Language.make(language='no')
>>> Language.find_name('language', 'norsk', 'en')
Traceback (most recent call last):
...
LookupError: Can't find any language named 'norsk'
>>> Language.find_name('language', 'norsk', 'no')
Language.make(language='no')
>>> Language.find_name('language', 'malayo', 'en')
Language.make(language='mbp')
>>> Language.find_name('language', 'malayo', 'es')
Language.make(language='ms')
Some langauge names resolve to more than a language. For example,
the name 'Brazilian Portuguese' resolves to a language and a territory,
and 'Simplified Chinese' resolves to a language and a script. In these
cases, a Language object with multiple subtags will be returned.
>>> Language.find_name('language', 'Brazilian Portuguese', 'en')
Language.make(language='pt', territory='BR')
>>> Language.find_name('language', 'Simplified Chinese', 'en')
Language.make(language='zh', script='Hans')
A small amount of fuzzy matching is supported: if the name can be
shortened to match a single language name, you get that language.
This allows, for example, "Hakka dialect" to match "Hakka".
>>> Language.find_name('language', 'Hakka dialect')
Language.make(language='hak')
"""
try:
from language_data.names import name_to_code
except ImportError:
print(LANGUAGE_NAME_IMPORT_MESSAGE, file=sys.stdout)
raise
# No matter what form of language we got, normalize it to a single
# language subtag
if isinstance(language, Language):
language = language.language
elif isinstance(language, str):
language = get(language).language
if language is None:
language = 'und'
code = name_to_code(tagtype, name, language)
if code is None:
raise LookupError(f"Can't find any {tagtype} named {name!r}")
if '-' in code:
return Language.get(code)
else:
data = {tagtype: code}
return Language.make(**data)
@staticmethod
def find(
name: str, language: Optional[Union[str, 'Language']] = None
) -> 'Language':
"""
A concise version of `find_name`, used to get a language tag by its
name in a natural language. The language can be omitted in the large
majority of cases, where the language name is not ambiguous.
>>> Language.find('Türkçe')
Language.make(language='tr')
>>> Language.find('brazilian portuguese')
Language.make(language='pt', territory='BR')
>>> Language.find('simplified chinese')
Language.make(language='zh', script='Hans')
Some language names are ambiguous: for example, there is a language
named 'Fala' in English (with code 'fax'), but 'Fala' is also the
Kwasio word for French. In this case, specifying the language that
the name is in is necessary for disambiguation.
>>> Language.find('fala')
Language.make(language='fr')
>>> Language.find('fala', 'nmg')
Language.make(language='fr')
>>> Language.find('fala', 'en')
Language.make(language='fax')
"""
return Language.find_name('language', name, language)
def to_dict(self) -> dict:
"""
Get a dictionary of the attributes of this Language object, which
can be useful for constructing a similar object.
"""
if self._dict is not None:
return self._dict
result = {}
for key in self.ATTRIBUTES:
value = getattr(self, key)
if value:
result[key] = value
self._dict = result
return result
def update(self, other: 'Language') -> 'Language':
"""
Update this Language with the fields of another Language.
"""
return Language.make(
language=other.language or self.language,
extlangs=other.extlangs or self.extlangs,
script=other.script or self.script,
territory=other.territory or self.territory,
variants=other.variants or self.variants,
extensions=other.extensions or self.extensions,
private=other.private or self.private,
)
def update_dict(self, newdata: dict) -> 'Language':
"""
Update the attributes of this Language from a dictionary.
"""
return Language.make(
language=newdata.get('language', self.language),
extlangs=newdata.get('extlangs', self.extlangs),
script=newdata.get('script', self.script),
territory=newdata.get('territory', self.territory),
variants=newdata.get('variants', self.variants),
extensions=newdata.get('extensions', self.extensions),
private=newdata.get('private', self.private),
)
@staticmethod
def _filter_keys(d: dict, keys: Iterable[str]) -> dict:
"""
Select a subset of keys from a dictionary.
"""
return {key: d[key] for key in keys if key in d}
def _filter_attributes(self, keyset: Iterable[str]) -> 'Language':
"""
Return a copy of this object with a subset of its attributes set.
"""
filtered = self._filter_keys(self.to_dict(), keyset)
return Language.make(**filtered)
def _searchable_form(self) -> 'Language':
"""
Convert a parsed language tag so that the information it contains is in
the best form for looking up information in the CLDR.
"""
if self._searchable is not None:
return self._searchable
self._searchable = (
self._filter_attributes({'language', 'script', 'territory'})
.simplify_script()
.prefer_macrolanguage()
)
return self._searchable
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, Language):
return False
return self._str_tag == other._str_tag
def __hash__(self) -> int:
return hash(id(self))
def __getitem__(self, key: str) -> Optional[Union[str, List[str]]]:
if key in self.ATTRIBUTES:
return getattr(self, key)
else:
raise KeyError(key)
def __contains__(self, key: str) -> bool:
return key in self.ATTRIBUTES and getattr(self, key)
def __repr__(self) -> str:
items = []
for attr in self.ATTRIBUTES:
if getattr(self, attr):
value = getattr(self, attr)
items.append(f'{attr}={value!r}')
joined = ', '.join(items)
return f"Language.make({joined})"
def __str__(self) -> str:
return self.to_tag()
# Make the get(), find(), and find_name() functions available at the top level
get = Language.get
find = Language.find
find_name = Language.find_name
# Make the Language object available under the old name LanguageData
LanguageData = Language
def standardize_tag(tag: Union[str, Language], macro: bool = False) -> str:
"""
Standardize a language tag:
- Replace deprecated values with their updated versions (if those exist)
- Remove script tags that are redundant with the language
- If *macro* is True, use a macrolanguage to represent the most common
standardized language within that macrolanguage. For example, 'cmn'
(Mandarin) becomes 'zh' (Chinese), and 'arb' (Modern Standard Arabic)
becomes 'ar' (Arabic).
- Format the result according to the conventions of BCP 47
Macrolanguage replacement is not required by BCP 47, but it is required
by the Unicode CLDR.
>>> standardize_tag('en_US')
'en-US'
>>> standardize_tag('en-Latn')
'en'
>>> standardize_tag('en-uk')
'en-GB'
>>> standardize_tag('eng')
'en'
>>> standardize_tag('arb-Arab', macro=True)
'ar'
>>> standardize_tag('sh-QU')
'sr-Latn-EU'
>>> standardize_tag('sgn-US')
'ase'
>>> standardize_tag('zh-cmn-hans-cn')
'cmn-Hans-CN'
>>> standardize_tag('zh-cmn-hans-cn', macro=True)
'zh-Hans-CN'
>>> standardize_tag('zsm', macro=True)
'ms'
>>> standardize_tag('ja-latn-hepburn')
'ja-Latn-hepburn'
>>> standardize_tag('spa-latn-mx')
'es-MX'
If the tag can't be parsed according to BCP 47, this will raise a
LanguageTagError (a subclass of ValueError):
>>> standardize_tag('spa-mx-latn')
Traceback (most recent call last):
...
langcodes.tag_parser.LanguageTagError: This script subtag, 'latn', is out of place. Expected variant, extension, or end of string.
"""
langdata = Language.get(tag, normalize=True)
if macro:
langdata = langdata.prefer_macrolanguage()
return langdata.simplify_script().to_tag()
def tag_match_score(
desired: Union[str, Language], supported: Union[str, Language]
) -> int:
"""
DEPRECATED: use .distance() instead, which uses newer data and is _lower_
for better matching languages.
Return a number from 0 to 100 indicating the strength of match between the
language the user desires, D, and a supported language, S. Higher numbers
are better. A reasonable cutoff for not messing with your users is to
only accept scores of 75 or more.
A score of 100 means the languages are the same, possibly after normalizing
and filling in likely values.
"""
warnings.warn(
"tag_match_score is deprecated because it's based on deprecated CLDR info. "
"Use tag_distance instead, which is _lower_ for better matching languages. ",
DeprecationWarning,
)
desired_ld = Language.get(desired)
supported_ld = Language.get(supported)
return desired_ld.match_score(supported_ld)
def tag_distance(desired: Union[str, Language], supported: Union[str, Language]) -> int:
"""
Tags that expand to the same thing when likely values are filled in get a
distance of 0.
>>> tag_distance('en', 'en')
0
>>> tag_distance('en', 'en-US')
0
>>> tag_distance('zh-Hant', 'zh-TW')
0
>>> tag_distance('ru-Cyrl', 'ru')
0
Language codes that are considered equivalent can also get distances of 0.
>>> tag_distance('nb', 'no') # Norwegian is about the same as Norwegian Bokmål
0
As a specific example, Serbo-Croatian is a politically contentious idea,
but in CLDR, it's considered equivalent to Serbian in Latin characters.
>>> tag_distance('sh', 'sr-Latn')
0
... which is very similar to Croatian but sociopolitically not the same.
>>> tag_distance('sh', 'hr')
9
These distances can be asymmetrical: this data includes the fact that speakers
of Swiss German (gsw) know High German (de), but not at all the other way around.
The difference seems a little bit extreme, but the asymmetry is certainly
there. And if your text is tagged as 'gsw', it must be that way for a
reason.
>>> tag_distance('gsw', 'de')
8
>>> tag_distance('de', 'gsw')
84
Unconnected languages get a distance of 80 to 134.
>>> tag_distance('en', 'zh')
134
>>> tag_distance('es', 'fr')
84
>>> tag_distance('fr-CH', 'de-CH')
80
Different local variants of the same language get a distance from 3 to 5.
>>> tag_distance('zh-HK', 'zh-MO') # Chinese is similar in Hong Kong and Macao
4
>>> tag_distance('en-AU', 'en-GB') # Australian English is similar to British English
3
>>> tag_distance('en-IN', 'en-GB') # Indian English is also similar to British English
3
>>> tag_distance('es-PE', 'es-419') # Peruvian Spanish is Latin American Spanish
1
>>> tag_distance('es-419', 'es-PE') # ...but Latin American Spanish is not necessarily Peruvian
4
>>> tag_distance('es-ES', 'es-419') # Spanish in Spain is further from Latin American Spanish
5
>>> tag_distance('en-US', 'en-GB') # American and British English are somewhat different
5
>>> tag_distance('es-MX', 'es-ES') # Mexican Spanish is different from Spanish Spanish
5
>>> # European Portuguese is different from the most common form (Brazilian Portuguese)
>>> tag_distance('pt', 'pt-PT')
5
>>> # Serbian has two scripts, and people might prefer one but understand both
>>> tag_distance('sr-Latn', 'sr-Cyrl')
5
A distance of 10 is used for matching a specific language to its
more-commonly-used macrolanguage tag.
>>> tag_distance('arz', 'ar') # Egyptian Arabic to Modern Standard Arabic
10
>>> tag_distance('wuu', 'zh') # Wu Chinese to (Mandarin) Chinese
10
Higher distances can arrive due to particularly contentious differences in
the script for writing the language, where people who understand one script
can learn the other but may not be happy with it. This specifically applies
to Chinese.
>>> tag_distance('zh-Hans', 'zh-Hant')
19
>>> tag_distance('zh-CN', 'zh-HK')
19
>>> tag_distance('zh-CN', 'zh-TW')
19
>>> tag_distance('zh-Hant', 'zh-Hans')
23
>>> tag_distance('zh-TW', 'zh-CN')
23
This distance range also applies to the differences between Norwegian
Bokmål, Nynorsk, and Danish.
>>> tag_distance('no', 'da')
12
>>> tag_distance('no', 'nn')
20
Differences of 20 to 50 can represent substantially different languages,
in cases where speakers of the first may understand the second for demographic
reasons.
>>> tag_distance('eu', 'es') # Basque to Spanish
20
>>> tag_distance('af', 'nl') # Afrikaans to Dutch
24
>>> tag_distance('mr', 'hi') # Marathi to Hindi
30
>>> tag_distance('ms', 'id') # Malay to Indonesian
34
>>> tag_distance('mg', 'fr') # Malagasy to French
34
>>> tag_distance('ta', 'en') # Tamil to English
44
A complex example is the tag 'yue' for Cantonese. Written Chinese is usually
presumed to be Mandarin Chinese, but colloquial Cantonese can be written as
well. (Some things could not be written any other way, such as Cantonese
song lyrics.)
The difference between Cantonese and Mandarin also implies script and
territory differences by default, adding to the distance.
>>> tag_distance('yue', 'zh')
64
When the supported script is a different one than desired, this is usually
a major difference with score of 50 or more.
>>> tag_distance('ja', 'ja-Latn-US-hepburn')
54
>>> # You can read the Shavian script, right?
>>> tag_distance('en', 'en-Shaw')
54
"""
desired_obj = Language.get(desired)
supported_obj = Language.get(supported)
return desired_obj.distance(supported_obj)
def best_match(
desired_language: Union[str, Language],
supported_languages: Sequence[str],
min_score: int = 75,
) -> Tuple[str, int]:
"""
DEPRECATED: use .closest_match() instead. This function emulates the old
matching behavior by subtracting the language distance from 100.
You have software that supports any of the `supported_languages`. You want
to use `desired_language`. This function lets you choose the right language,
even if there isn't an exact match.
Returns:
- The best-matching language code, which will be one of the
`supported_languages` or 'und'
- The score of the match, from 0 to 100; higher is better.
`min_score` sets the minimum match score. If all languages match with a lower
score than that, the result will be 'und' with a score of 0.
"""
max_distance = 100 - min_score
supported, distance = closest_match(
desired_language, supported_languages, max_distance
)
score = max(0, 100 - distance)
return supported, score
def closest_match(
desired_language: Union[str, Language],
supported_languages: Sequence[str],
max_distance: int = 25,
) -> Tuple[str, int]:
"""
You have software that supports any of the `supported_languages`. You want
to use `desired_language`. This function lets you choose the right language,
even if there isn't an exact match.
Returns:
- The best-matching language code, which will be one of the
`supported_languages` or 'und' for no match
- The distance of the match, which is 0 for a perfect match and increases
from there (see `tag_distance`)
`max_distance` sets the maximum match distance. If all matches are farther
than that, the result will be 'und' with a distance of 1000. The default
value is 25, and raising it can cause data to be processed in significantly
the wrong language. The documentation for `tag_distance` describes the
distance values in more detail.
When there is a tie for the best matching language, the first one in the
tie will be used.
"""
desired_language = str(desired_language)
# Quickly return if the desired language is directly supported
if desired_language in supported_languages:
return desired_language, 0
# Reduce the desired language to a standard form that could also match
desired_language = standardize_tag(desired_language)
if desired_language in supported_languages:
return desired_language, 0
match_distances = [
(supported, tag_distance(desired_language, supported))
for supported in supported_languages
]
match_distances = [
(supported, distance)
for (supported, distance) in match_distances
if distance <= max_distance
] + [('und', 1000)]
match_distances.sort(key=itemgetter(1))
return match_distances[0]
|
LuminosoInsight/langcodes
|
langcodes/__init__.py
|
Python
|
mit
| 65,962
|
[
"ASE"
] |
4788164f1f9e7cff4e131d167b023649f832d0654a4df2b86a69014ae30277d4
|
# ==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==========================================================================*/
import re
from typing import Optional, Union, Dict, Any, List, Tuple, Sequence, TYPE_CHECKING
from sys import stderr as system_error_stream
import numpy as np
try:
from numpy.typing import ArrayLike
except ImportError:
from numpy import ndarray as ArrayLike
import warnings
from sys import stderr as system_error_stream
import os
import builtins
fileiotype = Union[str, bytes, os.PathLike]
import itk.support.types as itkt
if TYPE_CHECKING:
try:
import xarray as xr
except ImportError:
pass
try:
import vtk
except ImportError:
pass
__all__ = [
"output",
"image",
"set_nthreads",
"get_nthreads",
"echo",
"size",
"physical_size",
"spacing",
"origin",
"index",
"region",
"GetArrayFromImage",
"array_from_image",
"GetArrayViewFromImage",
"array_view_from_image",
"GetImageFromArray",
"image_from_array",
"GetImageViewFromArray",
"image_view_from_array",
"array_from_vector_container",
"array_view_from_vector_container",
"vector_container_from_array",
"GetArrayFromVnlVector",
"array_from_vnl_vector",
"GetVnlVectorFromArray",
"vnl_vector_from_array",
"GetArrayViewFromVnlVector",
"array_view_from_vnl_vector",
"GetVnlMatrixFromArray",
"vnl_matrix_from_array",
"GetArrayFromVnlMatrix",
"array_from_vnl_matrix",
"GetArrayViewFromVnlMatrix",
"array_view_from_vnl_matrix",
"GetArrayFromMatrix",
"array_from_matrix",
"GetMatrixFromArray",
"matrix_from_array",
"xarray_from_image",
"image_from_xarray",
"vtk_image_from_image",
"image_from_vtk_image",
"image_intensity_min_max",
"imwrite",
"imread",
"meshwrite",
"meshread",
"transformwrite",
"transformread",
"search",
"set_inputs",
"templated_class",
"pipeline",
"auto_pipeline",
"down_cast",
"template",
"class_",
"ctype",
"python_type",
"range",
"TemplateTypeError",
]
def output(input):
"""
If input object has attribute "GetOutput()" then return an itk image,
otherwise this function simply returns the input value
"""
if hasattr(input, "GetOutput"):
return input.GetOutput()
return input
def image(input):
warnings.warn(
"WrapITK warning: itk.image() is deprecated. " "Use itk.output() instead."
)
return output(input)
def set_nthreads(number_of_threads: int) -> None:
"""
Support convenient set of the number of threads.
Use example (in python):
import itk
itk.set_nthreads(4) ## use 4 threads
"""
assert number_of_threads > 0, (
"Please set a positive number of threads instead of %d" % number_of_threads
)
import itk
threader = itk.MultiThreaderBase.New()
threader.SetGlobalDefaultNumberOfThreads(number_of_threads)
def get_nthreads() -> int:
"""
Get the number of threads
"""
import itk
threader = itk.MultiThreaderBase.New()
return threader.GetGlobalDefaultNumberOfThreads()
def echo(obj, f=system_error_stream) -> None:
"""Print an object to stream
If the object has a method Print(), this method is used.
repr(obj) is used otherwise
"""
print(f, obj)
def size(image_or_filter: "itkt.ImageOrImageSource") -> Sequence[int]:
"""Return the size of an image, or of the output image of a filter
This method take care of updating the needed information
"""
# we don't need the entire output, only its size
import itk
image_or_filter.UpdateOutputInformation()
img = itk.output(image_or_filter)
return img.GetLargestPossibleRegion().GetSize()
def physical_size(image_or_filter: "itkt.ImageOrImageSource") -> Sequence[float]:
"""Return the physical size of an image, or of the output image of a filter
This method take care of updating the needed information
"""
# required because range is overloaded in this module
from builtins import range
spacing_ = spacing(image_or_filter)
size_ = size(image_or_filter)
result = []
for i in range(0, spacing_.Size()):
result.append(spacing_.GetElement(i) * size_.GetElement(i))
return result
def spacing(image_or_filter: "itkt.ImageOrImageSource") -> Sequence[float]:
"""Return the spacing of an image, or of the output image of a filter
This method take care of updating the needed information
"""
import itk
# we don't need the entire output, only its size
image_or_filter.UpdateOutputInformation()
img = itk.output(image_or_filter)
return img.GetSpacing()
def origin(image_or_filter: "itkt.ImageOrImageSource") -> Sequence[float]:
"""Return the origin of an image, or of the output image of a filter
This method take care of updating the needed information
"""
import itk
# we don't need the entire output, only its size
image_or_filter.UpdateOutputInformation()
img = itk.output(image_or_filter)
return img.GetOrigin()
def index(image_or_filter: "itkt.ImageOrImageSource") -> Sequence[int]:
"""Return the index of an image, or of the output image of a filter
This method take care of updating the needed information
"""
import itk
# we don't need the entire output, only its size
image_or_filter.UpdateOutputInformation()
img = itk.output(image_or_filter)
return img.GetLargestPossibleRegion().GetIndex()
def region(image_or_filter: "itkt.ImageOrImageSource") -> "itkt.ImageRegion":
"""Return the region of an image, or of the output image of a filter
This method take care of updating the needed information
"""
import itk
# we don't need the entire output, only its size
image_or_filter.UpdateOutputInformation()
img = itk.output(image_or_filter)
return img.GetLargestPossibleRegion()
def _get_itk_pixelid(numpy_array_type):
"""Returns a ITK PixelID given a numpy array."""
import itk
# This is a Mapping from numpy array types to itk pixel types.
_np_itk = {
np.uint8: itk.UC,
np.uint16: itk.US,
np.uint32: itk.UI,
np.uint64: itk.UL,
np.int8: itk.SC,
np.int16: itk.SS,
np.int32: itk.SI,
np.int64: itk.SL,
np.float32: itk.F,
np.float64: itk.D,
np.complex64: itk.complex[itk.F],
np.complex128: itk.complex[itk.D],
}
try:
return _np_itk[numpy_array_type.dtype.type]
except KeyError as e:
for key in _np_itk:
if np.issubdtype(numpy_array_type.dtype.type, key):
return _np_itk[key]
raise e
def _GetArrayFromImage(
image_or_filter, function_name: str, keep_axes: bool, update: bool, ttype
) -> np.ndarray:
"""Get an Array with the content of the image buffer"""
# Finds the image type
import itk
img = itk.output(image_or_filter)
if ttype is not None:
if isinstance(ttype, (tuple, list)):
if len(ttype) != 1:
raise RuntimeError("Expected 1 component in ttype tuple.")
ImageType = ttype[0]
else:
ImageType = ttype
else:
ImageType = img.__class__
keys = [k for k in itk.PyBuffer.keys() if k[0] == ImageType]
if len(keys) == 0:
raise RuntimeError("No suitable template parameter can be found.")
# Create a numpy array of the type of the input image
templatedFunction = getattr(itk.PyBuffer[keys[0]], function_name)
return templatedFunction(img, keep_axes, update)
def GetArrayFromImage(
image_or_filter: "itkt.ImageOrImageSource",
keep_axes: bool = False,
update: bool = True,
ttype=None,
) -> np.ndarray:
"""Get an array with the content of the image buffer"""
return _GetArrayFromImage(
image_or_filter, "GetArrayFromImage", keep_axes, update, ttype
)
array_from_image = GetArrayFromImage
def GetArrayViewFromImage(
image_or_filter: "itkt.ImageOrImageSource",
keep_axes: bool = False,
update: bool = True,
ttype=None,
) -> np.ndarray:
"""Get an array view with the content of the image buffer"""
return _GetArrayFromImage(
image_or_filter, "GetArrayViewFromImage", keep_axes, update, ttype
)
array_view_from_image = GetArrayViewFromImage
def _GetImageFromArray(arr: ArrayLike, function_name: str, is_vector: bool, ttype):
"""Get an ITK image from a Python array."""
import itk
# Verify inputs
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr)
if ttype is not None:
if is_vector:
raise RuntimeError("Cannot specify both `is_vector` and `ttype`.")
if isinstance(ttype, (tuple, list)):
if len(ttype) != 1:
raise RuntimeError("Expected 1 component in ttype tuple.")
ImageType = ttype[0]
else:
ImageType = ttype
if type(itk.template(ImageType)) != tuple or len(itk.template(ImageType)) < 2:
raise RuntimeError("Cannot determine pixel type from supplied ttype.")
is_vector = (
type(itk.template(ImageType)[1][0]) != itk.support.types.itkCType
or itk.template(ImageType)[0] == itk.VectorImage
)
else:
PixelType = _get_itk_pixelid(arr)
Dimension = arr.ndim
if is_vector:
Dimension = arr.ndim - 1
if arr.flags["C_CONTIGUOUS"]:
VectorDimension = arr.shape[-1]
else:
VectorDimension = arr.shape[0]
if PixelType == itk.UC:
if VectorDimension == 3:
ImageType = itk.Image[itk.RGBPixel[itk.UC], Dimension]
elif VectorDimension == 4:
ImageType = itk.Image[itk.RGBAPixel[itk.UC], Dimension]
else:
ImageType = itk.VectorImage[PixelType, Dimension]
else:
ImageType = itk.VectorImage[PixelType, Dimension]
else:
ImageType = itk.Image[PixelType, Dimension]
keys = [k for k in itk.PyBuffer.keys() if k[0] == ImageType]
if len(keys) == 0:
raise RuntimeError(
"""No suitable template parameter can be found.
Please specify an output type via the 'ttype' keyword parameter."""
)
templatedFunction = getattr(itk.PyBuffer[keys[0]], function_name)
return templatedFunction(arr, is_vector)
def GetImageFromArray(
arr: ArrayLike, is_vector: bool = False, ttype=None
) -> "itkt.ImageBase":
"""Get an ITK image from a Python array."""
return _GetImageFromArray(arr, "GetImageFromArray", is_vector, ttype)
image_from_array = GetImageFromArray
def GetImageViewFromArray(
arr: ArrayLike, is_vector: bool = False, ttype=None
) -> "itkt.ImageBase":
"""Get an ITK image view from a Python array."""
return _GetImageFromArray(arr, "GetImageViewFromArray", is_vector, ttype)
image_view_from_array = GetImageViewFromArray
def array_from_vector_container(
container: "itkt.VectorContainer", ttype=None
) -> np.ndarray:
"""Get an Array with the content of the vector container"""
import itk
container_template = itk.template(container)
IndexType = container_template[1][0]
# Find container data type
if ttype is not None:
if isinstance(ttype, (tuple, list)):
if len(ttype) != 1:
raise RuntimeError("Expected 1 component in ttype tuple.")
DataType = ttype[0]
else:
DataType = ttype
else:
DataType = container_template[1][1]
keys = [k for k in itk.PyVectorContainer.keys() if k == (IndexType, DataType)]
if len(keys) == 0:
raise RuntimeError("No suitable template parameter can be found.")
# Create numpy array of the type of the input container
return itk.PyVectorContainer[keys[0]].array_from_vector_container(container)
def array_view_from_vector_container(
container: "itkt.VectorContainer", ttype=None
) -> np.ndarray:
"""Get an Array view with the content of the vector container"""
import itk
container_template = itk.template(container)
IndexType = container_template[1][0]
# Find container type
if ttype is not None:
if isinstance(ttype, (tuple, list)):
if len(ttype) != 1:
raise RuntimeError("Expected 1 component in ttype tuple.")
DataType = ttype[0]
else:
DataType = ttype
else:
DataType = container_template[1][1]
keys = [k for k in itk.PyVectorContainer.keys() if k == (IndexType, DataType)]
if len(keys) == 0:
raise RuntimeError("No suitable template parameter can be found.")
# Create numpy array of the type of the input container
return itk.PyVectorContainer[keys[0]].array_view_from_vector_container(container)
def vector_container_from_array(arr: ArrayLike, ttype=None) -> "itkt.VectorContainer":
"""Get a vector container from a Python array"""
import itk
# Verify inputs
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr)
# Return VectorContainer with 64-bit index type
IndexType = itk.ULL
# Find container type
if ttype is not None:
if isinstance(ttype, (tuple, list)):
if len(ttype) != 1:
raise RuntimeError("Expected 1 component in ttype tuple.")
DataType = ttype[0]
else:
DataType = ttype
else:
DataType = _get_itk_pixelid(arr)
keys = [k for k in itk.PyVectorContainer.keys() if k == (IndexType, DataType)]
if len(keys) == 0:
raise RuntimeError("No suitable template parameter can be found.")
# Create numpy array of the type of the input container
return itk.PyVectorContainer[keys[0]].vector_container_from_array(arr)
def _GetArrayFromVnlObject(vnl_object, function_name: str, ttype) -> np.ndarray:
"""Get an array with the content of vnl_object"""
# Finds the vnl object type
import itk
if ttype is not None:
if isinstance(ttype, (tuple, list)):
if len(ttype) != 1:
raise RuntimeError("Expected 1 component in ttype tuple.")
PixelType = ttype[0]
else:
PixelType = ttype
else:
PixelType = itk.template(vnl_object)[1][0]
keys = [k for k in itk.PyVnl.keys() if k[0] == PixelType]
if len(keys) == 0:
raise RuntimeError("No suitable template parameter can be found.")
# Create a numpy array of the type of the vnl object
templatedFunction = getattr(itk.PyVnl[keys[0]], function_name)
return templatedFunction(vnl_object)
def GetArrayFromVnlVector(vnl_vector, ttype=None) -> np.ndarray:
"""Get an array with the content of vnl_vector"""
return _GetArrayFromVnlObject(vnl_vector, "GetArrayFromVnlVector", ttype)
array_from_vnl_vector = GetArrayFromVnlVector
def GetArrayViewFromVnlVector(vnl_vector, ttype=None) -> np.ndarray:
"""Get an array view of vnl_vector"""
return _GetArrayFromVnlObject(vnl_vector, "GetArrayViewFromVnlVector", ttype)
array_view_from_vnl_vector = GetArrayFromVnlVector
def GetArrayFromVnlMatrix(vnl_matrix, ttype=None) -> np.ndarray:
"""Get an array with the content of vnl_matrix"""
return _GetArrayFromVnlObject(vnl_matrix, "GetArrayFromVnlMatrix", ttype)
array_from_vnl_matrix = GetArrayFromVnlMatrix
def GetArrayViewFromVnlMatrix(vnl_matrix, ttype=None) -> np.ndarray:
"""Get an array view of vnl_matrix"""
return _GetArrayFromVnlObject(vnl_matrix, "GetArrayViewFromVnlMatrix", ttype)
array_view_from_vnl_matrix = GetArrayViewFromVnlMatrix
def _GetVnlObjectFromArray(arr: ArrayLike, function_name: str, ttype):
"""Get a vnl object from a Python array."""
import itk
# Verify inputs
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr)
if ttype is not None:
if isinstance(ttype, (tuple, list)):
if len(ttype) != 1:
raise RuntimeError("Expected 1 component in ttype tuple.")
PixelType = ttype[0]
else:
PixelType = ttype
else:
PixelType = _get_itk_pixelid(arr)
keys = [k for k in itk.PyVnl.keys() if k[0] == PixelType]
if len(keys) == 0:
raise RuntimeError("No suitable template parameter can be found.")
templatedFunction = getattr(itk.PyVnl[keys[0]], function_name)
return templatedFunction(arr)
def GetVnlVectorFromArray(arr: ArrayLike, ttype=None):
"""Get a vnl vector from a Python array."""
return _GetVnlObjectFromArray(arr, "GetVnlVectorFromArray", ttype)
vnl_vector_from_array = GetVnlVectorFromArray
def GetVnlMatrixFromArray(arr: ArrayLike, ttype=None):
"""Get a vnl matrix from a Python array."""
return _GetVnlObjectFromArray(arr, "GetVnlMatrixFromArray", ttype)
vnl_matrix_from_array = GetVnlMatrixFromArray
def GetArrayFromMatrix(itk_matrix) -> np.ndarray:
return GetArrayFromVnlMatrix(itk_matrix.GetVnlMatrix().as_matrix())
array_from_matrix = GetArrayFromMatrix
def GetMatrixFromArray(arr: ArrayLike) -> "itkt.Matrix":
import itk
vnl_matrix = GetVnlMatrixFromArray(arr)
dims = arr.shape
PixelType = _get_itk_pixelid(arr)
m = itk.Matrix[PixelType, dims[0], dims[1]](vnl_matrix)
return m
matrix_from_array = GetMatrixFromArray
def xarray_from_image(l_image: "itkt.ImageOrImageSource") -> "xr.DataArray":
"""Convert an itk.Image to an xarray.DataArray.
Origin and spacing metadata is preserved in the xarray's coords. The
Direction is set in the `direction` attribute.
Dims are labeled as `x`, `y`, `z`, `t`, and `c`.
This interface is and behavior is experimental and is subject to possible
future changes."""
import xarray as xr
import itk
import numpy as np
array_view = itk.array_view_from_image(l_image)
l_spacing = itk.spacing(l_image)
l_origin = itk.origin(l_image)
l_size = itk.size(l_image)
direction = np.flip(itk.array_from_matrix(l_image.GetDirection()))
image_dimension = l_image.GetImageDimension()
image_dims: Tuple[str, str, str] = ("x", "y", "z", "t")
coords = {}
for l_index, dim in enumerate(image_dims[:image_dimension]):
coords[dim] = np.linspace(
l_origin[l_index],
l_origin[l_index] + (l_size[l_index] - 1) * l_spacing[l_index],
l_size[l_index],
dtype=np.float64,
)
dims = list(reversed(image_dims[:image_dimension]))
components = l_image.GetNumberOfComponentsPerPixel()
if components > 1:
dims.append("c")
coords["c"] = np.arange(components, dtype=np.uint32)
direction = np.flip(itk.array_from_matrix(l_image.GetDirection()))
attrs = {"direction": direction}
metadata = dict(l_image)
ignore_keys = set(["direction", "origin", "spacing"])
for key in metadata:
if not key in ignore_keys:
attrs[key] = metadata[key]
data_array = xr.DataArray(array_view, dims=dims, coords=coords, attrs=attrs)
return data_array
def image_from_xarray(data_array: "xr.DataArray") -> "itkt.ImageBase":
"""Convert an xarray.DataArray to an itk.Image.
Metadata encoded with xarray_from_image is applied to the itk.Image.
This interface is and behavior is experimental and is subject to possible
future changes."""
import numpy as np
import itk
if not {"t", "z", "y", "x", "c"}.issuperset(data_array.dims):
raise ValueError('Unsupported dims, supported dims: "t", "z", "y", "x", "c".')
image_dims = list({"t", "z", "y", "x"}.intersection(set(data_array.dims)))
image_dims.sort(reverse=True)
image_dimension = len(image_dims)
ordered_dims = ("t", "z", "y", "x")[-image_dimension:]
is_vector = "c" in data_array.dims
if is_vector:
ordered_dims = ordered_dims + ("c",)
values = data_array.values
if ordered_dims != data_array.dims:
dest = list(builtins.range(len(ordered_dims)))
source = dest.copy()
for ii in builtins.range(len(ordered_dims)):
source[ii] = data_array.dims.index(ordered_dims[ii])
values = np.moveaxis(values, source, dest).copy()
itk_image = itk.image_view_from_array(values, is_vector=is_vector)
l_origin = [0.0] * image_dimension
l_spacing = [1.0] * image_dimension
for l_index, dim in enumerate(image_dims):
coords = data_array.coords[dim]
if coords.shape[0] > 1:
l_origin[l_index] = float(coords[0])
l_spacing[l_index] = float(coords[1]) - float(coords[0])
l_spacing.reverse()
itk_image.SetSpacing(l_spacing)
l_origin.reverse()
itk_image.SetOrigin(l_origin)
if "direction" in data_array.attrs:
direction = data_array.attrs["direction"]
itk_image.SetDirection(np.flip(direction))
ignore_keys = set(["direction", "origin", "spacing"])
for key in data_array.attrs:
if not key in ignore_keys:
itk_image[key] = data_array.attrs[key]
return itk_image
def vtk_image_from_image(l_image: "itkt.ImageOrImageSource") -> "vtk.vtkImageData":
"""Convert an itk.Image to a vtk.vtkImageData."""
import itk
import vtk
from vtk.util.numpy_support import numpy_to_vtk
array = itk.array_view_from_image(l_image)
vtk_image = vtk.vtkImageData()
data_array = numpy_to_vtk(array.reshape(-1))
data_array.SetNumberOfComponents(l_image.GetNumberOfComponentsPerPixel())
data_array.SetName("Scalars")
# Always set Scalars for (future?) multi-component volume rendering
vtk_image.GetPointData().SetScalars(data_array)
dim = l_image.GetImageDimension()
l_spacing = [1.0] * 3
l_spacing[:dim] = l_image.GetSpacing()
vtk_image.SetSpacing(l_spacing)
l_origin = [0.0] * 3
l_origin[:dim] = l_image.GetOrigin()
vtk_image.SetOrigin(l_origin)
dims = [1] * 3
dims[:dim] = itk.size(l_image)
vtk_image.SetDimensions(dims)
# Copy direction matrix for VTK>=9
import vtk
if vtk.vtkVersion.GetVTKMajorVersion() >= 9:
l_direction = l_image.GetDirection()
direction = itk.array_from_matrix(l_direction).flatten().tolist()
if len(direction) == 4:
# Change 2d matrix to 3d
direction = [
direction[0],
direction[1],
0.0,
direction[2],
direction[3],
0.0,
0.0,
0.0,
1.0,
]
vtk_image.SetDirectionMatrix(direction)
if l_image.GetImageDimension() == 3:
PixelType = itk.template(l_image)[1][0]
if PixelType == itk.Vector:
vtk_image.GetPointData().SetVectors(data_array)
elif PixelType == itk.CovariantVector:
vtk_image.GetPointData().SetVectors(data_array)
elif PixelType == itk.SymmetricSecondRankTensor:
vtk_image.GetPointData().SetTensors(data_array)
elif PixelType == itk.DiffusionTensor3D:
vtk_image.GetPointData().SetTensors(data_array)
return vtk_image
def image_from_vtk_image(vtk_image: "vtk.vtkImageData") -> "itkt.ImageBase":
"""Convert a vtk.vtkImageData to an itk.Image."""
import itk
from vtk.util.numpy_support import vtk_to_numpy
point_data = vtk_image.GetPointData()
array = vtk_to_numpy(point_data.GetScalars())
array = array.reshape(-1)
is_vector = point_data.GetScalars().GetNumberOfComponents() != 1
dims = list(vtk_image.GetDimensions())
if is_vector and dims[-1] == 1:
# 2D
dims = dims[:2]
dims.reverse()
dims.append(point_data.GetScalars().GetNumberOfComponents())
else:
dims.reverse()
array.shape = tuple(dims)
l_image = itk.image_view_from_array(array, is_vector)
dim = l_image.GetImageDimension()
l_spacing = [1.0] * dim
l_spacing[:dim] = vtk_image.GetSpacing()[:dim]
l_image.SetSpacing(l_spacing)
l_origin = [0.0] * dim
l_origin[:dim] = vtk_image.GetOrigin()[:dim]
l_image.SetOrigin(l_origin)
# Direction support with VTK 9
import vtk
if vtk.vtkVersion.GetVTKMajorVersion() >= 9:
direction = vtk_image.GetDirectionMatrix()
if dim == 3:
direction_array = np.identity(3)
for y in (0, 1, 2):
for x in (0, 1, 2):
direction_array[x, y] = direction.GetElement(x, y)
elif dim == 2:
direction_array = np.identity(2)
for y in (0, 1):
for x in (0, 1):
direction_array[x, y] = direction.GetElement(x, y)
l_direction = itk.matrix_from_array(direction_array)
l_image.SetDirection(l_direction)
return l_image
# return an image
def image_intensity_min_max(image_or_filter: "itkt.ImageOrImageSource"):
"""Return the minimum and maximum of values in a image of in the output image of a filter
The minimum and maximum values are returned in a tuple: (min, max)
image_intensity_min_max() take care of updating the pipeline
"""
import itk
img = itk.output(image_or_filter)
img.UpdateOutputInformation()
img.Update()
# don't put that calculator in the automatic pipeline
tmp_auto_pipeline = auto_pipeline.current
auto_pipeline.current = None
comp = itk.MinimumMaximumImageCalculator[img].New(Image=img)
auto_pipeline.current = tmp_auto_pipeline
comp.Compute()
return comp.GetMinimum(), comp.GetMaximum()
# range is a python function, and should not be overridden
# the current use of the function name "range" is for backward
# compatibility, but should be considered for removal in the future
def range(image_or_filter):
return image_intensity_min_max(image_or_filter)
def imwrite(
image_or_filter: "itkt.ImageOrImageSource",
filename: fileiotype,
compression: bool = False,
imageio: Optional["itkt.ImageIOBase"] = None,
) -> None:
"""Write a image or the output image of a filter to a file.
Parameters
----------
image_or_filter :
Image or filter that produces an image to write to the file.
filename :
Target output file path.
compression :
Use compression when writing if the format supports it.
imageio :
Use the provided itk.ImageIOBase derived instance to write the file.
The writer is instantiated with the image type of the image in
parameter (or, again, with the output image of the filter in parameter).
"""
import itk
img = itk.output(image_or_filter)
img.UpdateOutputInformation()
# don't put that writer in the automatic pipeline
tmp_auto_pipeline = auto_pipeline.current
auto_pipeline.current = None
writer = itk.ImageFileWriter[type(img)].New(
Input=img, FileName=f"{filename}", UseCompression=compression
)
auto_pipeline.current = tmp_auto_pipeline
if imageio:
writer.SetImageIO(imageio)
writer.Update()
def imread(
filename: fileiotype,
pixel_type: Optional["itkt.PixelTypes"] = None,
fallback_only: bool = False,
imageio: Optional["itkt.ImageIOBase"] = None,
) -> "itkt.ImageBase":
"""Read an image from a file or series of files and return an itk.Image.
Parameters
----------
filename :
File path for a single file, a list of files for an image series, or a
directory for a DICOM image series.
pixel_type :
Image pixel type to cast to when loading.
fallback_only :
If true, first try to automatically deduce the image pixel type, and
only use the given `pixel_type` if automatic deduction fails.
imageio :
Use the provided itk.ImageIOBase derived instance to read the file.
Returns
-------
image :
The resulting itk.Image.
The reader is instantiated with the image type of the image file if
`pixel_type` is not provided (default). The dimension of the image is
automatically deduced from the dimension stored on disk.
If the filename provided is a directory then the directory is assumed to
be for a DICOM series volume. If there is exactly one DICOM series
volume in that directory, the reader will use an itk.ImageSeriesReader
object to read the the DICOM filenames within that directory.
If the given filename is a list or a tuple of file names, the reader
will use an itk.ImageSeriesReader object to read the files.
If `fallback_only` is set to `True`, `imread()` will first try to
automatically deduce the image pixel_type, and only use the given
`pixel_type` if automatic deduction fails. Failures typically happen if
the pixel type is not supported (e.g. it is not currently wrapped).
"""
import itk
from itk.support.extras import TemplateTypeError
if fallback_only:
if pixel_type is None:
raise Exception(
"pixel_type must be set when using the fallback_only option"
)
try:
return imread(filename)
except (KeyError, TemplateTypeError):
pass
if type(filename) not in [list, tuple]:
import os
if os.path.isdir(filename):
# read DICOM series of 1 image in a folder, refer to: https://github.com/RSIP-Vision/medio
names_generator = itk.GDCMSeriesFileNames.New()
names_generator.SetUseSeriesDetails(True)
names_generator.AddSeriesRestriction("0008|0021") # Series Date
names_generator.SetDirectory(f"{filename}")
series_uid = names_generator.GetSeriesUIDs()
if len(series_uid) == 0:
raise FileNotFoundError(f"no DICOMs in: {filename}.")
if len(series_uid) > 1:
raise OSError(
f"the directory: {filename} contains more than one DICOM series."
)
series_identifier = series_uid[0]
filename = names_generator.GetFileNames(series_identifier)
if type(filename) in [list, tuple]:
template_reader_type = itk.ImageSeriesReader
io_filename = f"{filename[0]}"
increase_dimension = True
kwargs = {"FileNames": [f"{f}" for f in filename]}
else:
template_reader_type = itk.ImageFileReader
io_filename = f"{filename}"
increase_dimension = False
kwargs = {"FileName": f"{filename}"}
if imageio:
kwargs["ImageIO"] = imageio
if pixel_type:
image_IO = itk.ImageIOFactory.CreateImageIO(
io_filename, itk.CommonEnums.IOFileMode_ReadMode
)
if not image_IO:
raise RuntimeError("No ImageIO is registered to handle the given file.")
image_IO.SetFileName(io_filename)
image_IO.ReadImageInformation()
dimension = image_IO.GetNumberOfDimensions()
# Increase dimension if last dimension is not of size one.
if increase_dimension and image_IO.GetDimensions(dimension - 1) != 1:
dimension += 1
is_vlv = False
try:
is_vlv = itk.template(pixel_type)[0] is itk.VariableLengthVector
except KeyError:
pass
if is_vlv:
ImageType = itk.VectorImage[itk.template(pixel_type)[1][0], dimension]
else:
ImageType = itk.Image[pixel_type, dimension]
reader = template_reader_type[ImageType].New(**kwargs)
else:
reader = template_reader_type.New(**kwargs)
reader.Update()
return reader.GetOutput()
def meshwrite(
mesh: "itkt.Mesh", filename: fileiotype, compression: bool = False
) -> None:
"""Write a mesh to a file.
The writer is instantiated according to the type of the input mesh.
"""
import itk
mesh.UpdateOutputInformation()
# don't put that writer in the automatic pipeline
tmp_auto_pipeline = auto_pipeline.current
auto_pipeline.current = None
writer = itk.MeshFileWriter[type(mesh)].New(
Input=mesh, FileName=f"{filename}", UseCompression=compression
)
auto_pipeline.current = tmp_auto_pipeline
writer.Update()
def meshread(
filename: fileiotype,
pixel_type: Optional["itkt.PixelTypes"] = None,
fallback_only: bool = False,
) -> "itkt.Mesh":
"""Read a mesh from a file and return an itk.Mesh.
The reader is instantiated with the mesh type of the mesh file if
`pixel_type` is not provided (default). The dimension of the mesh is
automatically found.
If `fallback_only` is set to `True`, `meshread()` will first try to
automatically deduce the image pixel_type, and only use the given
`pixel_type` if automatic deduction fails. Failures typically
happen if the pixel type is not supported (e.g. it is not currently
wrapped).
"""
import itk
if fallback_only:
if pixel_type is None:
raise Exception(
"pixel_type must be set when using the fallback_only option"
)
try:
return meshread(filename)
except (KeyError, itk.TemplateTypeError):
pass
TemplateReaderType = itk.MeshFileReader
io_filename = f"{filename}"
increase_dimension = False
kwargs = {"FileName": f"{filename}"}
if pixel_type:
meshIO = itk.MeshIOFactory.CreateMeshIO(
io_filename, itk.CommonEnums.IOFileMode_ReadMode
)
if not meshIO:
raise RuntimeError("No MeshIO is registered to handle the given file.")
meshIO.SetFileName(io_filename)
meshIO.ReadMeshInformation()
dimension = meshIO.GetPointDimension()
# Increase dimension if last dimension is not of size one.
if increase_dimension and meshIO.GetDimensions(dimension - 1) != 1:
dimension += 1
MeshType = itk.Mesh[pixel_type, dimension]
reader = TemplateReaderType[MeshType].New(**kwargs)
else:
reader = TemplateReaderType.New(**kwargs)
reader.Update()
return reader.GetOutput()
def transformread(filename: fileiotype) -> List["itkt.TransformBase"]:
"""Read an itk Transform file.
Parameters
----------
filename:
Path to the transform file (typically a .h5 file).
Returns
-------
A Python list containing the transforms in the file.
"""
import itk
reader = itk.TransformFileReaderTemplate[itk.D].New()
reader.SetFileName(f"{filename}")
reader.Update()
transforms = []
transform_list = reader.GetModifiableTransformList()
while not transform_list.empty():
transform = transform_list.pop()
transforms.append(itk.down_cast(transform))
transforms.reverse()
return transforms
def transformwrite(
transforms: List["itkt.TransformBase"],
filename: fileiotype,
compression: bool = False,
) -> None:
"""Write an itk Transform file.
Parameters
----------
transforms: list of itk.TransformBaseTemplate[itk.D]
Python list of the transforms to write.
filename:
Path to the transform file (typically a .h5 file).
compression:
Use compression, if the file format supports it.
"""
import itk
writer = itk.TransformFileWriterTemplate[itk.D].New()
writer.SetFileName(f"{filename}")
writer.SetUseCompression(compression)
for transform in transforms:
writer.AddTransform(transform)
writer.Update()
def search(s: str, case_sensitive: bool = False) -> List[str]: # , fuzzy=True):
"""Search for a class name in the itk module."""
s = s.replace(" ", "")
if not case_sensitive:
s = s.lower()
import itk
names = sorted(dir(itk))
# exact match first
if case_sensitive:
res = [n for n in names if s == n]
else:
res = [n for n in names if s == n.lower()]
# then exact match inside the name
if case_sensitive:
res += [n for n in names if s in n and s != n]
else:
res += [n for n in names if s in n.lower() and s != n.lower()]
# if fuzzy:
# try:
# everything now requires editdist
# import editdist
# if case_sensitive:
# res.sort(key=lambda x: editdist.distance(x, s))
# else:
# res.sort(key=lambda x: (editdist.distance(x.lower(), s), x))
# except:
# pass
return res
def _snake_to_camel(keyword: str):
# Helpers for set_inputs snake case to CamelCase keyword argument conversion
_snake_underscore_re = re.compile("(_)([a-z0-9A-Z])")
def _underscore_upper(match_obj):
return match_obj.group(2).upper()
camel = keyword[0].upper()
if _snake_underscore_re.search(keyword[1:]):
return camel + _snake_underscore_re.sub(_underscore_upper, keyword[1:])
return camel + keyword[1:]
def set_inputs(
new_itk_object,
inargs: Optional[Sequence[Any]] = None,
inkargs: Optional[Dict[str, Any]] = None,
):
"""Set the inputs of the given objects, according to the non named or the
named parameters in args and kargs
This function tries to assign all the non named parameters in the input of
the new_itk_object
- the first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name
prefixed by 'Set'.
set_inputs( obj, kargs={'Threshold': 10} ) calls obj.SetThreshold(10)
This is the function use in the enhanced New() method to manage the inputs.
It can be used to produce a similar behavior:
def SetInputs(self, *args, **kargs):
import itk
itk.set_inputs(self, *args, **kargs)
"""
# Fix bug with Mutable Default Arguments
# https://docs.python-guide.org/writing/gotchas/
args: List[Any] = inargs if inargs else []
kargs: Dict[str, Any] = inkargs if inkargs else {}
# try to get the images from the filters in args
args = [output(arg) for arg in args]
# args without name are filter used to set input image
#
# count SetInput calls to call SetInput, SetInput2, SetInput3, ...
# useful with filter which take 2 input (or more) like SubtractImageFiler
# Ex: subtract image2.png to image1.png and save the result in result.png
# r1 = itk.ImageFileReader.US2.New(FileName='image1.png')
# r2 = itk.ImageFileReader.US2.New(FileName='image2.png')
# s = itk.SubtractImageFilter.US2US2US2.New(r1, r2)
# itk.ImageFileWriter.US2.New(s, FileName='result.png').Update()
setInputNb: int = -1
try:
for setInputNb, arg in enumerate(args):
methodName = "SetInput%i" % (setInputNb + 1)
if methodName in dir(new_itk_object):
# first try to use methods called SetInput1, SetInput2, ...
# those method should have more chances to work in case of
# multiple input types
getattr(new_itk_object, methodName)(arg)
else:
# no method called SetInput?
# try with the standard SetInput(nb, input)
new_itk_object.SetInput(setInputNb, arg)
except TypeError as e:
# the exception have (at least) to possible reasons:
# + the filter don't take the input number as first argument
# + arg is an object of wrong type
#
# if it's not the first input, re-raise the exception
if setInputNb != 0:
raise e
# it's the first input, try to use the SetInput() method without input
# number
new_itk_object.SetInput(args[0])
# but raise an exception if there is more than 1 argument
if len(args) > 1:
raise TypeError("Object accepts only 1 input.")
except AttributeError:
# There is no SetInput() method, try SetImage
# but before, check the number of inputs
if len(args) > 1:
raise TypeError("Object accepts only 1 input.")
methodList = ["SetImage", "SetInputImage"]
methodName = None
for m in methodList:
if m in dir(new_itk_object):
methodName = m
if methodName:
getattr(new_itk_object, methodName)(args[0])
else:
raise AttributeError("No method found to set the input.")
# named args : name is the function name, value is argument(s)
for attribName, value in kargs.items():
# use Set as prefix. It allow to use a shorter and more intuitive
# call (Ex: itk.ImageFileReader.UC2.New(FileName='image.png')) than
# with the full name
# (Ex: itk.ImageFileReader.UC2.New(SetFileName='image.png'))
if attribName not in ["auto_progress", "template_parameters"]:
if attribName.islower():
attribName = _snake_to_camel(attribName)
attrib = getattr(new_itk_object, "Set" + attribName)
# Do not use try-except mechanism as this leads to
# segfaults. Instead limit the number of types that are
# tested. The list of tested type could maybe be replaced by
# a test that would check for iterables.
import itk
if type(value) in [list, tuple]:
try:
output_value = [itk.output(x) for x in value]
attrib(*output_value)
except Exception:
attrib(itk.output(value))
else:
attrib(itk.output(value))
class templated_class:
"""This class is used to mimic the behavior of the templated C++ classes.
It is used this way:
class CustomClass:
# class definition here
CustomClass = templated_class(CustomClass)
customObject = CustomClass[template, parameters].New()
The template parameters are passed to the custom class constructor as a
named parameter 'template_parameters' in a tuple.
The custom class may implement a static method
check_template_parameters(parameters) which should raise an exception if
the template parameters provided are not suitable to instantiate the custom
class.
"""
def __init__(self, cls) -> None:
"""cls is the custom class"""
self.__cls__ = cls
self.__templates__ = {}
def New(self, *args, **kargs):
"""Use the parameters to infer the types of the template parameters."""
# extract the types from the arguments to instantiate the class
import itk
types = tuple(class_(o) for o in args)
return self[types].New(*args, **kargs)
def __getitem__(self, template_parameters):
"""Return a pair class-template parameters ready to be instantiated.
The template parameters may be validated if the custom class provide
the static method check_template_parameters(parameters).
"""
if not isinstance(template_parameters, tuple):
template_parameters = (template_parameters,)
return templated_class.__templated_class_and_parameters__(
self, template_parameters
)
def check_template_parameters(self, template_parameters) -> None:
"""Check the template parameters passed in parameter."""
# this method is there mainly to make possible to reuse it in the
# custom class constructor after having used templated_class().
# Without that, the following example doesn't work:
#
# class CustomClass:
# def __init__(self, *args, **kargs):
# template_parameters = kargs["template_parameters"]
# CustomClass.check_template_parameters(template_parameters)
# other init stuff
# def check_template_parameters(template_parameters):
# check, really
# pass
# CustomClass = templated_class(CustomClass)
#
self.__cls__.check_template_parameters(template_parameters)
def add_template(self, name: str, params):
if not isinstance(params, list) and not isinstance(params, tuple):
params = (params,)
params = tuple(params)
val = self[params]
self.__templates__[params] = val
setattr(self, name, val)
def add_image_templates(self, *args) -> None:
import itk
if not args:
return
combinations = [[t] for t in args[0]]
for types in args[1:]:
temp = []
for t in types:
for c in combinations:
temp.append(c + [t])
combinations = temp
for d in itk.DIMS:
for c in combinations:
parameters = []
name = ""
for t in c:
parameters.append(itk.Image[t, d])
name += "I" + t.short_name + str(d)
self.add_template(name, tuple(parameters))
class __templated_class_and_parameters__:
"""Inner class used to store the pair class-template parameters ready
to instantiate.
"""
def __init__(self, l_templated_class, l_template_parameters) -> None:
self.__templated_class__ = l_templated_class
self.__template_parameters__ = l_template_parameters
if "check_template_parameters" in dir(l_templated_class.__cls__):
l_templated_class.__cls__.check_template_parameters(
l_template_parameters
)
def New(self, *args, **kargs):
"""A New() method to mimic the ITK default behavior, even if the
class doesn't provide any New() method.
"""
kargs["template_parameters"] = self.__template_parameters__
if "New" in dir(self.__templated_class__.__cls__):
obj = self.__templated_class__.__cls__.New(*args, **kargs)
else:
obj = self.__templated_class__.__cls__(*args, **kargs)
setattr(obj, "__template_parameters__", self.__template_parameters__)
setattr(obj, "__templated_class__", self.__templated_class__)
return obj
def __call__(self, *args, **kargs):
return self.New(*args, **kargs)
def keys(self):
return self.__templates__.keys()
def values(self):
return list(self.__templates__.values())
def items(self):
return list(self.__templates__.items())
# everything after this comment is for dict interface
# and is a copy/paste from DictMixin
# only methods to edit dictionary are not there
def __iter__(self) -> str:
for k in self.keys():
yield k
def has_key(self, key: str):
return key in self.__templates__
def __contains__(self, key: str):
return key in self
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
return self.get(key, default)
def __len__(self):
return len(self.keys())
class pipeline:
"""A convenient class to store the reference to the filters of a pipeline
With this class, a method can create a pipeline of several filters and
return it without losing the references to the filters in this pipeline.
The pipeline object act almost like a filter (it has a GetOutput() method)
and thus can be simply integrated in another pipeline.
"""
def __init__(self, *args, **kargs) -> None:
self.clear()
self.input = None
self.filters: List[Any] = []
set_inputs(self, args, kargs)
def connect(self, l_filter) -> None:
"""Connect a new l_filter to the pipeline
The output of the first l_filter will be used as the input of this
one and the l_filter passed as parameter will be added to the list
"""
if self.GetOutput() is not None:
set_inputs(l_filter, [self.GetOutput()])
self.append(l_filter)
def append(self, l_filter) -> None:
"""Add a new l_filter to the pipeline
The new l_filter will not be connected. The user must connect it.
"""
self.filters.append(l_filter)
def clear(self) -> None:
"""Clear the filter list"""
self.filters = []
def GetOutput(self, l_index: int = 0):
"""Return the output of the pipeline
If another output is needed, use
pipeline.filters[-1].GetAnotherOutput() instead of this method,
subclass pipeline to implement another GetOutput() method, or use
expose()
"""
if len(self.filters) == 0:
return self.GetInput()
else:
l_filter = self.filters[-1]
if hasattr(l_filter, "__getitem__"):
return l_filter[l_index]
try:
return l_filter.GetOutput(l_index)
except Exception:
if l_index == 0:
return l_filter.GetOutput()
else:
raise ValueError("Index can only be 0 on that object")
def GetNumberOfOutputs(self) -> int:
"""Return the number of outputs"""
if len(self.filters) == 0:
return 1
else:
return self.filters[-1].GetNumberOfOutputs()
def SetInput(self, l_input) -> None:
"""Set the l_input of the pipeline"""
if len(self.filters) != 0:
set_inputs(self.filters[0], [l_input])
self.l_input = l_input
def GetInput(self):
"""Get the input of the pipeline"""
return self.input
def Update(self):
"""Update the pipeline"""
if len(self.filters) > 0:
return self.filters[-1].Update()
def UpdateLargestPossibleRegion(self):
"""Update the pipeline"""
if len(self.filters) > 0:
return self.filters[-1].UpdateLargestPossibleRegion()
def UpdateOutputInformation(self) -> None:
if "UpdateOutputInformation" in dir(self.filters[-1]):
self.filters[-1].UpdateOutputInformation()
else:
self.Update()
def __len__(self):
return self.GetNumberOfOutputs()
def __getitem__(self, item):
return self.GetOutput(item)
def __call__(self, *args, **kargs):
set_inputs(self, args, kargs)
self.UpdateLargestPossibleRegion()
return self
def expose(self, name: str, new_name: Optional[str] = None, position: int = -1):
"""Expose an attribute from a filter of the mini-pipeline.
Once called, the pipeline instance has a new Set/Get set of methods to
access directly the corresponding method of one of the filter of the
pipeline.
Ex: p.expose( "Radius" )
p.SetRadius( 5 )
p.GetRadius( 5 )
By default, the attribute usable on the pipeline instance has the same
name than the one of the filter, but it can be changed by providing a
value to new_name.
The last filter of the pipeline is used by default, but another one may
be used by giving its position.
Ex: p.expose("Radius", "SmoothingNeighborhood", 2)
p.GetSmoothingNeighborhood()
"""
if new_name is None:
new_name = name
src = self.filters[position]
ok: bool = False
set_name: str = "Set" + name
if set_name in dir(src):
setattr(self, "Set" + new_name, getattr(src, set_name))
ok = True
get_name = "Get" + name
if get_name in dir(src):
setattr(self, "Get" + new_name, getattr(src, get_name))
ok = True
if not ok:
raise RuntimeError(f"No attribute {name} at position {position}.")
class auto_pipeline(pipeline):
current = None
def __init__(self, *args, **kargs) -> None:
pipeline.__init__(self, *args, **kargs)
self.Start()
def Start(self) -> None:
auto_pipeline.current = self
@staticmethod
def Stop() -> None:
auto_pipeline.current = None
def down_cast(obj: "itkt.LightObject"):
"""Down cast an itk.LightObject (or a object of a subclass) to its most
specialized type.
"""
import itk
from itk.support.template_class import itkTemplate
class_name: str = obj.GetNameOfClass()
t = getattr(itk, class_name)
if isinstance(t, itkTemplate):
for c in t.values():
try:
return c.cast(obj)
except Exception:
# fail silently for now
pass
raise RuntimeError(f"Can't downcast to a specialization of {class_name}")
else:
return t.cast(obj)
def attribute_list(inputobject, name: str):
"""Returns a list of the specified attributes for the objects in the image.
i: the input LabelImage
name: the attribute name
"""
import itk
img = itk.output(inputobject)
relabel = itk.StatisticsRelabelLabelMapFilter[img].New(
img, Attribute=name, ReverseOrdering=True, InPlace=False
)
relabel.UpdateLargestPossibleRegion()
r = relabel.GetOutput()
l_list: List[Any] = []
# required because range is overloaded in this module
import sys
from builtins import range
for i in range(1, r.GetNumberOfLabelObjects() + 1):
l_list.append(r.GetLabelObject(i).__getattribute__("Get" + name)())
return l_list
def attributes_list(inputObject, names: List[str]):
"""Returns a list of the specified attributes for the objects in the image.
i: the input LabelImage
name: the attribute name
"""
import itk
img = itk.output(inputObject)
relabel = itk.StatisticsRelabelLabelMapFilter[img].New(
img, Attribute=names[0], ReverseOrdering=True, InPlace=False
)
relabel.UpdateLargestPossibleRegion()
r = relabel.GetOutput()
l_list: List[Any] = []
# required because range is overloaded in this module
from builtins import range
for i in range(1, r.GetNumberOfLabelObjects() + 1):
attrs = []
for name in names:
attrs.append(r.GetLabelObject(i).__getattribute__("Get" + name)())
l_list.append(tuple(attrs))
return l_list
def attribute_dict(inputobject, name: str):
"""Returns a dict with the attribute values in keys and a list of the
corresponding objects in value
i: the input LabelImage
name: the name of the attribute
"""
import itk
img = itk.output(inputobject)
relabel = itk.StatisticsRelabelLabelMapFilter[img].New(
img, Attribute=name, ReverseOrdering=True, InPlace=False
)
relabel.UpdateLargestPossibleRegion()
r = relabel.GetOutput()
d = {}
# required because range is overloaded in this module
from builtins import range
for i in range(1, r.GetNumberOfLabelObjects() + 1):
lo = r.GetLabelObject(i)
v = lo.__getattribute__("Get" + name)()
l_list = d.get(v, [])
l_list.append(lo)
d[v] = l_list
return d
def number_of_objects(image_or_filter) -> int:
"""Returns the number of objets in the image.
img: the input LabelImage
"""
import itk
image_or_filter.UpdateLargestPossibleRegion()
img = itk.output(image_or_filter)
return img.GetNumberOfLabelObjects()
def ipython_kw_matches(text: str):
"""Match named ITK object's named parameters"""
import IPython
import itk
import re
import inspect
from itk.support import template_class
regexp = re.compile(
r"""
'.*?' | # single quoted strings or
".*?" | # double quoted strings or
\w+ | # identifier
\S # other characters
""",
re.VERBOSE | re.DOTALL,
)
ip = IPython.get_ipython()
if "." in text: # a parameter cannot be dotted
return []
# 1. Find the nearest identifier that comes before an unclosed
# parenthesis e.g. for "foo (1+bar(x), pa", the candidate is "foo".
if ip.Completer.readline:
text_until_cursor = ip.Completer.readline.get_line_buffer()[
: ip.Completer.readline.get_endidx()
]
else:
# IPython >= 5.0.0, which is based on the Python Prompt Toolkit
text_until_cursor = ip.Completer.text_until_cursor
tokens = regexp.findall(text_until_cursor)
tokens.reverse()
iter_tokens = iter(tokens)
open_par = 0
for token in iter_tokens:
if token == ")":
open_par -= 1
elif token == "(":
open_par += 1
if open_par > 0:
# found the last unclosed parenthesis
break
else:
return []
# 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
ids = []
is_id = re.compile(r"\w+$").match
while True:
try:
ids.append(iter_tokens.next())
if not is_id(ids[-1]):
ids.pop()
break
if not iter_tokens.next() == ".":
break
except StopIteration:
break
# lookup the candidate callable matches either using global_matches
# or attr_matches for dotted names
if len(ids) == 1:
callable_matches = ip.Completer.global_matches(ids[0])
else:
callable_matches = ip.Completer.attr_matches(".".join(ids[::-1]))
arg_matches = []
for callable_match in callable_matches:
# drop the .New at this end, so we can search in the class members
if callable_match.endswith(".New"):
callable_match = callable_match[:-4]
elif not re.findall("([A-Z])", callable_match): # True if snake case
# Split at the last '.' occurrence
split_name_parts = callable_match.split(".")
namespace = split_name_parts[:-1]
function_name = split_name_parts[-1]
# Find corresponding object name
object_name = _snake_to_camel(function_name)
# Check that this object actually exists
try:
object_callable_match = ".".join(namespace + [object_name])
eval(object_callable_match, ip.Completer.namespace)
# Reconstruct full object name
callable_match = object_callable_match
except AttributeError:
# callable_match is not a snake case function with a
# corresponding object.
pass
try:
l_object = eval(callable_match, ip.Completer.namespace)
if isinstance(l_object, template_class.itkTemplate):
# this is a template - lets grab the first entry to search for
# the methods
l_object = l_object.values()[0]
named_args = []
is_in: bool = isinstance(l_object, itk.LightObject)
if is_in or (
inspect.isclass(l_object) and issubclass(l_object, itk.LightObject)
):
named_args = [n[3:] for n in dir(l_object) if n.startswith("Set")]
except Exception as e:
print(e)
continue
for namedArg in named_args:
if namedArg.startswith(text):
arg_matches.append(f"{namedArg}=")
return arg_matches
def template(cl):
"""Return the template of a class (or of the class of an object) and
its parameters
template() returns a tuple with 2 elements:
- the first one is the itkTemplate object
- the second is a tuple containing the template parameters
"""
from itk.support.template_class import itkTemplateBase
return itkTemplateBase.__template_instantiations_object_to_name__[class_(cl)]
def ctype(s: str) -> "itkt.itkCType":
"""Return the c type corresponding to the string passed in parameter
The string can contain some extra spaces.
see also itkCType
"""
from itk.support.types import itkCType
ret = itkCType.GetCType(" ".join(s.split()))
if ret is None:
raise KeyError(f"Unrecognized C type '{s}'")
return ret
def class_(obj):
"""Return a class from an object
Often in itk, the __class__ is not what the user is expecting.
class_() should do a better job
"""
import inspect
if inspect.isclass(obj):
# obj is already a class !
return obj
else:
return obj.__class__
def python_type(object_ref) -> str:
"""Returns the Python type name of an object
The Python name corresponding to the given instantiated object is printed.
This includes both the Python name and the parameters of the object. A user
can copy and paste the printed value to instantiate a new object of the
same type."""
from itk.support.template_class import itkTemplate
from itk.support.types import itkCType
def in_itk(name):
import itk
# Remove "itk::" and "std::" from template name.
# Only happens for ITK objects.
shortname: str = name.split("::")[-1]
shortname = shortname.split("itk")[-1]
namespace = itk
# A type cannot be part of ITK if its name was not modified above. This
# check avoids having an input of type `list` and return `itk.list` that
# also exists.
likely_itk: bool = shortname != name or name[:3] == "vnl"
if likely_itk and hasattr(namespace, shortname):
return namespace.__name__ + "." + shortname # Prepend name with 'itk.'
else:
return name
def recursive(l_obj, level: int):
try:
type_name, param_list = template(l_obj)
name = in_itk(type_name.__name__)
parameters = []
for t in param_list:
parameters.append(recursive(t, level + 1))
return name + "[" + ",".join(parameters) + "]"
except KeyError:
if isinstance(l_obj, itkCType): # Handles CTypes differently
return "itk." + l_obj.short_name
elif hasattr(l_obj, "__name__"):
# This should be where most ITK types end up.
return in_itk(l_obj.__name__)
elif (
not isinstance(l_obj, type)
and type(l_obj) != itkTemplate
and level != 0
):
# l_obj should actually be considered a value, not a type,
# or it is already an itkTemplate type.
# A value can be an integer that is a template parameter.
# This does not happen at the first level of the recursion
# as it is not possible that this object would be a template
# parameter. Checking the level `0` allows e.g. to find the
# type of an object that is a `list` or an `int`.
return str(l_obj)
else:
return in_itk(type(l_obj).__name__)
return recursive(object_ref, 0)
class TemplateTypeError(TypeError):
def __init__(self, template_type, input_type):
def tuple_to_string_type(t):
if type(t) == tuple:
return ", ".join(python_type(x) for x in t)
else:
python_type(t)
import itk
# Special case for ITK readers: Add extra information.
extra_eg: str = ""
if template_type in [
itk.ImageFileReader,
itk.ImageSeriesReader,
itk.MeshFileReader,
]:
extra_eg = """
or
e.g.: image = itk.imread(my_input_filename, itk.F)
"""
python_template_type = python_type(template_type)
python_input_type = tuple_to_string_type(input_type)
type_list = "\n".join([python_type(x[0]) for x in template_type.keys()])
eg_type = ", ".join([python_type(x) for x in list(template_type.keys())[0]])
msg: str = """{template_type} is not wrapped for input type `{input_type}`.
To limit the size of the package, only a limited number of
types are available in ITK Python. To print the supported
types, run the following command in your python environment:
{template_type}.GetTypes()
Possible solutions:
* If you are an application user:
** Convert your input image into a supported format (see below).
** Contact developer to report the issue.
* If you are an application developer, force input images to be
loaded in a supported pixel type.
e.g.: instance = {template_type}[{eg_type}].New(my_input){extra_eg}
* (Advanced) If you are an application developer, build ITK Python yourself and
turned to `ON` the corresponding CMake option to wrap the pixel type or image
dimension you need. When configuring ITK with CMake, you can set
`ITK_WRAP_${{type}}` (replace ${{type}} with appropriate pixel type such as
`double`). If you need to support images with 4 or 5 dimensions, you can add
these dimensions to the list of dimensions in the CMake variable
`ITK_WRAP_IMAGE_DIMS`.
Supported input types:
{type_list}
""".format(
template_type=python_template_type,
input_type=python_input_type,
type_list=type_list,
eg_type=eg_type,
extra_eg=extra_eg,
)
TypeError.__init__(self, msg)
# install progress callback and custom completer if we are in ipython
# interpreter
try:
import itkConfig
import IPython
if IPython.get_ipython():
IPython.get_ipython().Completer.matchers.insert(0, ipython_kw_matches)
# some cleanup
del itkConfig, IPython
except (ImportError, AttributeError):
# fail silently
pass
|
vfonov/ITK
|
Wrapping/Generators/Python/itk/support/extras.py
|
Python
|
apache-2.0
| 66,365
|
[
"VTK"
] |
3021de329b0492e8cb3003eb0c0272ebdb96b999225c9df63bdee774fa972ee6
|
# Encoding: utf-8
import json
import math
import numpy as np
import pymatgen as pmg
import cage.utils as utils
import pymatgen.symmetry.analyzer as syman
from itertools import combinations
from monty.io import zopen
from monty.json import MSONable
from pymatgen.core.structure import Molecule
from pymatgen.core.structure import SiteCollection
from pymatgen.core.operations import SymmOp
"""
Core tools of the cage package. Defines the Cage, OccupiedCage and Facet class.
"""
__author__ = "Marnik Bercx"
__version__ = "0.1"
__maintainer__ = "Marnik Bercx"
__email__ = "marnik.bercx@uantwerpen.be"
__status__ = "alpha"
__date__ = "14 JUN 2017"
SYMMETRY_TOLERANCE = 1e-2
# This is a tolerance value to determine the symmetry operations of the Cage.
# It is also used to determine which facets are equivalent. The standard value
# of 1E-2 is usually pretty good. In case the right non-equivalent facets are
# not found, it might be worth trying to tweaking this value.
ANGLE_TOLERANCE = math.pi / 20
# This value is important when determining whether or not a new site is a part
# of the facet. In principle, the site should be in the plane of the facet,
# i.e. the angle between the line connecting the center of the facet and the
# site and the normal of the Facet should be pi/2. This parameters allows a
# deviation of the angle up to ANGLE_TOLERANCE
class Cage(Molecule):
"""
A Cage is a pymatgen.Molecule-based object for molecules shaped similar
to fullerenes.
"""
def __init__(self, species, coords, charge=0, spin_multiplicity=None,
validate_proximity=False, site_properties=None):
"""
Create a Cage instance. The Cage molecule's geometric center is
automatically centered on the origin.
Args:
species (List of pymatgen.Specie): List of atomic species. Possible
kinds of input include a list of dict of elements/species and
occupancies, a List of elements/specie specified as actual
Element/Specie, Strings ("Fe", "Fe2+") or atomic numbers
(1,56).
coords (List of (3,) numpy.ndarray): List of cartesian coordinates
of each species.
charge (float): Charge for the molecule. Defaults to 0.
validate_proximity (bool): Whether to check if there are sites
that are less than 1 Ang apart. Defaults to False.
site_properties (dict): Properties associated with the sites as
a dict of sequences, e.g., {"magmom":[5,5,5,5]}. The
sequences have to be the same length as the atomic species
and fractional_coords. Defaults to None for no properties.
Returns:
(*cage.Cage*)
"""
super(Cage, self).__init__(species, coords, charge, spin_multiplicity,
validate_proximity, site_properties)
self.center()
self._facets = None
self._pointgroup = None
self._symmops = None
self._facet_dict = None
@property
def facets(self):
"""
Surface Facets of the Cage. Note that in case the surface facets have
not been set up using *find.surface.facets()*, the property is equal to
*None*.
Returns:
(List of Facets): The surface facets of the Cage, as set up using
find_surface_facets()
"""
return self._facets
@property
def pointgroup(self):
"""
The Schoenflies PointGroup of the Cage molecule.
Returns:
(*pymatgen.symmetry.analyzer.PointGroup*)
"""
if not self._pointgroup:
self._pointgroup = syman.PointGroupAnalyzer(self).get_pointgroup()
return self._pointgroup
@property
def symmops(self):
"""
The symmetry operations of the Cage.
Returns:
(*List of pymatgen.Symmop*)
"""
if not self._symmops:
# Set up the point group analyzer
pgan = syman.PointGroupAnalyzer(self)
# Find the full set of symmetry operations
self._symmops = syman.generate_full_symmops(pgan.symmops,
SYMMETRY_TOLERANCE)
return self._symmops
@property
def anion_center(self):
anion_coords = [site.coords for site in self.sites
if site.specie not in OccupiedCage.CATIONS]
return sum(anion_coords) / len(anion_coords)
@classmethod
def from_poscar(cls, filename):
"""
Imports a Cage from a VASP POSCAR file.
Args:
filename (string): Filename of the POSCAR file.
Returns:
(*cage.Cage*)
"""
# Import the structure from the POSCAR file
structure = pmg.Structure.from_file(filename)
# Generate the molecule object from the structure sites
cage = cls(structure.species, structure.cart_coords)
return cage
@classmethod
def from_molecule(cls, mol):
"""
Initializes a Cage from a Molecule.
Args:
mol (pymatgen.Molecule): The molecule from which to initialize the
cage.
Returns:
(*cage.Cage*)
"""
assert type(mol) is Molecule or type(mol) is Cage
return cls(species=mol.species, coords=mol.cart_coords,
charge=mol.charge, spin_multiplicity=mol.spin_multiplicity,
site_properties=mol.site_properties)
def to_poscar(self, filename='POSCAR'):
"""
Writes the Cage to a POSCAR file.
"""
pass # TODO
def copy(self):
"""
Convenience method to get a copy of the Cage. Overwritten from the
Molecule class to conserve the charge of the molecule.
Returns:
cage.core.Cage
"""
copy = super(Cage, self).copy()
copy.set_charge_and_spin(charge=self.charge)
return copy
def center(self, point=None):
"""
Center the Cage around a point by updating the sites, i.e. find the
coordinates for the sites so that the geometric center is on the point
provided. In case no point is provided, the molecule is centered around
the origin.
Note: Running this method will reset the facets and symmetry
information to None.
Args:
point ((3,) numpy.ndarray): Point around which to center the
molecule.
"""
center = sum([site.coords for site in self.sites]) / len(self.sites)
if point is not None:
center -= point
# Find the new coordinates
new_coords = np.array(self.cart_coords) - center
# Update the sites
sites = []
for i in range(len(self.species)):
prop = None
if self.site_properties:
prop = {k: v[i] for k, v in self.site_properties.items()}
sites.append(pmg.Site(self.species[i], new_coords[i],
properties=prop))
self._sites = sites
self._facets = None
self._symmops = None
self._pointgroup = None
self._facet_dict = None
def redefine_origin(self, origin):
"""
Change the coordinates of the Cage, in order to redefine the origin.
Note: Running this method will reset the facets and symmetry
information to None.
Args:
origin ((3,) numpy.ndarray): Origin coordinates.
"""
# Find the new coordinates
new_coords = np.array(self.cart_coords) - origin
# Update the sites
sites = []
for i in range(len(self.species)):
prop = None
if self.site_properties:
prop = {k: v[i] for k, v in self.site_properties.items()}
sites.append(pmg.Site(self.species[i], new_coords[i],
properties=prop))
self._sites = sites
self._facets = None
self._symmops = None
self._pointgroup = None
self._facet_dict = None
def insert(self, index, species, coords, validate_proximity=True,
properties=None):
"""
Overwrite the insert method of the Molecule class, in order to
reset the facets, symmetry operations and point group after the site
has been inserted.
Note: Running this method will reset the facets and symmetry
information to None.
Args:
index (int): Index to insert site.
species (pymatgen.Specie): Species of inserted site.
coords ((3,) numpy.ndarray): Coordinates of inserted site.
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to True.
properties (dict): A dictionary of properties for the Site.
"""
super(Cage, self).insert(index, species, coords, validate_proximity,
properties)
self._facets = None
self._symmops = None
self._pointgroup = None
self._facet_dict = None
def append(self, species, coords, validate_proximity=True,
properties=None):
"""
Overwrite the append method of the Molecule class, in order to
reset the facets, symmetry operations and point group after the site
has been appended.
Note: Running this method will reset the facets and symmetry
information to None.
Args:
species (pymatgen.Specie): Species of inserted site.
coords ((3,) numpy.ndarray): Coordinates of inserted site.
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to True.
properties (dict): A dictionary of properties for the Site.
"""
super(Cage, self).append(species, coords, validate_proximity,
properties)
self._facets = None
self._symmops = None
self._pointgroup = None
self._facet_dict = None
def find_surface_facets(self, ignore=()):
"""
Find all the surface facets of the Cage object. A surface facet is
defined as a facet for which all atoms of non-ignored species are on
one side of the surface defined by the facet.
Args:
ignore (List/Tuple of str/Element): The elements to ignore for the
surface facet determination. Can be either a tuple or list of
Elements or strings describing those elements
"""
# Check if the content of ignore contains strings
if any([type(item) is str for item in ignore]):
# If so, turn them into elements
ignore = tuple([pmg.Element(item) for item in ignore])
# Find all the sites which should not be ignored
valid_sites = [site for site in self.sites if site.specie not in ignore]
# Find all of the Facets from combinations of three valid Sites
all_facets = [Facet(list(combination)) for combination
in combinations(valid_sites, 3)]
# Flip the normal of the facets in case it points to the center of mass
# of the Cage
for facet in all_facets:
if facet.angle_to_normal(self.center_of_mass) < math.pi / 2:
facet.flip_normal()
# Find all the facets that are "on the surface"
facets_surf = []
for facet in all_facets:
# If all the angles are larger than pi/2, it's a surface site
all_angles_smaller = True
# Check the other sites in the molecule
other_sites = valid_sites.copy()
for site in facet.sites:
other_sites.remove(site)
for site in other_sites:
angle = abs(facet.angle_to_normal(site.coords))
# For angles sufficiently close to pi/2, add the site to the
# facet
if abs(angle - math.pi / 2) < ANGLE_TOLERANCE:
facet.add_site(site)
elif angle - math.pi / 2 < -ANGLE_TOLERANCE:
all_angles_smaller = False
# Now check if the facet isn't already part of the surface facets
facet_in_list = False
for surf_facet in facets_surf:
if len(set(facet.sites) & set(surf_facet.sites)) \
== len(facet.sites):
facet_in_list = True
# In that case, add it to the surface sites
if all_angles_smaller and not facet_in_list:
facets_surf.append(facet)
self._facets = facets_surf
def find_noneq_facets(self, tol=SYMMETRY_TOLERANCE):
"""
Find the nonequivalent facets of the Cage.
Args:
tol (float): Tolerance for the equivalence condition, i.e. how much
the distance between the centers is allowed to be after
a symmetry operation.
Returns:
*List of Facets*: A set of non-equivalent facets of the Cage.
"""
if not self.facets:
print("Please set up surface facets first")
return []
# Find all the non-equivalent facets
facets_noneq = []
for facet in self.facets:
facet_is_nonequivalent = True
# Check to see if the facet is equivalent to one in the
# nonequivalent list
for facet_noneq in facets_noneq:
for symm in self.symmops:
symm_facet_center = symm.operate(facet.center.tolist())
if np.linalg.norm(symm_facet_center - facet_noneq.center) \
< tol:
facet_is_nonequivalent = False
if facet_is_nonequivalent:
facets_noneq.append(facet)
return facets_noneq
def set_up_facet_list(self, fmt='dict', tol=SYMMETRY_TOLERANCE):
"""
Set up a List of the surface facets, and how they relate to the
non-equivalent facets, i.e. which non-equivalent facet they can be
related to and using which symmetry operation:
noneq_facet = symmop(facet)
Args:
fmt(str): Format of the Facet List. Can be either:
*dict* -- A dictionary with the facets as keys and the tuple
(noneq_facet, symmop) as the values.
*str_array* -- A structured array of type:
[('surf_facet', Facet), ('noneq_facet', Facet),
('symmop', SymmOp)]
tol (float): Tolerance for the equivalence condition, i.e. how much
the distance between the centers is allowed to be after
a symmetry operation.
Returns:
(*List of cage.Facets*) -- The list of facets with their
corresponding non-equivalent facet and symmetry operation.
See the *fmt* argument.
"""
if not self.facets:
print("Please set up surface facets first.")
return []
facet_list = []
for facet in self.facets:
for noneq_facet in self.find_noneq_facets():
for symm in self.symmops:
symm_center = symm.operate(facet.center)
if np.linalg.norm(symm_center - noneq_facet.center) < tol:
facet_list.append((facet, noneq_facet, symm))
break
if fmt == 'str_array':
list_types = [('surf_facet', Facet), ('noneq_facet', Facet),
('symmop', SymmOp)]
facet_array = np.array(facet_list, dtype=list_types)
if len(facet_array) == len(self.facets):
return facet_array
else:
raise ValueError("Obtained array length is not equal to number"
" of facets. Something must have gone wrong.")
elif fmt == 'dict':
facet_dict = {}
for i in range(len(facet_list)):
facet_dict[facet_list[i][0]] = (facet_list[i][1],
facet_list[i][2])
if len(facet_dict.keys()) == len(self.facets):
return facet_dict
else:
raise ValueError("Obtained number of facets in dict is not "
"equal to number of surface facets. "
"Something must have gone wrong.")
def visualize_facets(self, filename, ignore=(), max_bond_length=1.9):
"""
Visualize the facets of the Cage by setting up a VESTA file with
the various facets in different colours and with the corresponding
index given to the atom, labeled 'Fa' placed at the center of the
facet.
Args:
filename (str): Name of the VESTA file to be written.
"""
# TODO This method is very specific. We probably should find a more general way to do this.
# TODO Fix bug for facets with more than three sites
file = open(filename, "w")
file.write("#VESTA_FORMAT_VERSION 3.3.0\n\n")
file.write("MOLECULE\n")
# Write structure data
file.write("\n")
file.write("STRUC")
file.write("\n")
site_number = 1
specie_number = 1
for site in self.sites:
file.write(str(site_number) + "\t" + str(site.specie) + "\t" +
str(site.specie) + str(specie_number) + "\t1.000")
for coord in site.coords:
file.write("\t" + str(coord))
file.write("\t1\t-\n\t\t\t\t0.0000\t0.0000\t0.0000\t0.00\n")
try:
if self.sites[self.sites.index(site) + 1].specie \
== site.specie:
specie_number += 1
else:
specie_number = 1
except IndexError:
pass
site_number += 1
# Add the centers of the facets
for index in range(len(self.facets)):
file.write(str(site_number) + "\tFa" + "\tFa" + str(index) + "\t"
+ "1.000")
for coord in self.facets[index].center:
file.write("\t" + str(coord))
file.write("\t1\t-\n\t\t\t\t0.0000\t0.0000\t0.0000\t0.00\n")
site_number += 1
file.write(" 0")
# Write the bond data
file.write("\n\n")
file.write("SBOND\n")
bonds = list(combinations([element for element in self.composition],
2))
for element in self.composition:
bonds.append((element, element))
bond_number = 1
for bond in bonds:
file.write(str(bond_number) + "\t" + str(bond[0]) + "\t"
+ str(bond[1]) + "\t0.00000\t" + str(max_bond_length) +
"\t0\t1\t0\t0\t1\t0.250\t2.000\t127\t127\t127\n")
bond_number += 1
for element in self.composition:
if element not in ignore:
file.write(str(bond_number) + "\tFa" + "\t" + str(element) +
"\t0.00000\t1.85\t0\t1\t1\t0\t1\t0.250\t2.000"
"\t210\t50\t20\n")
bond_number += 1
file.write(" 0")
# Write the atom data
file.write("\n\n")
file.write("ATOMT\n")
file.write("\n")
atom_number = 1
for element in self.composition:
file.write(str(atom_number) + "\t" + str(element) + "\t"
+ str(float(element.atomic_radius)) + "\t")
# Add the RGB colour of the atom
if element == pmg.Element('B'):
file.write("31\t162\t15\t31\t162\t15\t204\n")
elif element == pmg.Element('C'):
file.write("128\t73\t41\t128\t73\t41\t204\n")
elif element == pmg.Element('H'):
file.write("255\t204\t204\t255\t204\t204\t204\n")
elif element == pmg.Element('Br'):
file.write("255\t24\t204\t255\t204\t204\t204\n")
elif element == pmg.Element('Cl'):
file.write("255\t204\t24\t255\t204\t204\t204\n")
elif element == pmg.Element('F'):
file.write("255\t204\t204\t255\t204\t204\t204\n")
elif element == pmg.Element('I'):
file.write("25\t204\t204\t255\t204\t204\t204\n")
else:
file.write("255\t204\t204\t255\t204\t204\t204\n")
atom_number += 1
file.write(str(atom_number) + "\tFa\t0.4\t140\t40\t40"
"\t215\t50\t20\t220\n")
atom_number += 1
file.write(" 0")
file.write("\n\n")
file.write("STYLE\n")
file.write("MODEL 2 1 0\n")
file.write("POLYS 2\n")
def find_noneq_facet_chain(self, start=0, facets=tuple,
symm_tol=SYMMETRY_TOLERANCE,
verbose=False):
"""
Construct a chain of facets, i.e. a collection of facets that
are connected by edges. Automatically sorts the facets so they
are connected by their neighbours in the list.
Args:
start (int): Determines from which termination facet the chain is
constructed. This might be useful if the chain is not
constructed as the user would like.
facets (tuple): Tuple of Facets which are to be used for the
chain. In case no facets are provided, the list of
non-equivalent facets will be used.
symm_tol (float): Tolerance for the equivalence condition, i.e.
how much the distance between the centers is allowed to be
after a symmetry operation.
verbose (bool): Print information about the analysis procedure.
This is mainly useful when the result is not as expected.
Returns:
(*List of Facets*) -- A chain of facets connected by sharing an
edge.
"""
# TODO This code needs cleanup and serious testing
if verbose:
print("")
print("Starting search for chain of non-equivalent facets in "
"molecule " + self.composition.__str__().replace(' ', '')
+ "...")
print("")
print("Looking for non-equivalent facets...")
facet_dict = self.set_up_facet_list('dict', tol=symm_tol)
# If no facets are provided, set up the connected non-equivalent facets
if facets == tuple:
noneq_facets = self.find_noneq_facets(tol=symm_tol)
chain_length = len(noneq_facets)
if verbose:
print("Found " + str(len(noneq_facets)) +
" non-equivalent facets.")
print("")
# Find the facets in the chain, i.e. connect the non-equivalent
# facets
chain_facets = [noneq_facets[0]]
chain_list_noneq_facets = [noneq_facets[0]]
while len(chain_facets) < len(noneq_facets):
new_chain_facet = False
# Loop over the facets in the chain
for chain_facet in chain_facets:
# If a new facet has been appended, restart the loop
if not new_chain_facet:
# Find a facet that shares an edge
for facet in self.facets:
# Check if the facet shares an edge and is not
# related to one of the non-equivalent facets in
# the chain
if len(set(facet.sites) & set(chain_facet.sites)) \
== 2 and \
(facet_dict[facet][0]
not in chain_list_noneq_facets):
chain_facets.append(facet)
chain_list_noneq_facets.append(
facet_dict[facet][0]
)
new_chain_facet = True
break
else:
chain_facets = list(facets)
chain_length = len(facets)
if verbose:
print("Found " + str(len(chain_facets)) +
" facets in chain.")
print("")
if chain_length == 0 or chain_length == 1:
raise ValueError("Number of facets is too small to form a chain.")
# Find the termination facets. These are defined as facets which only
# have one edge with other chain facets.
end_facets = []
for facet in chain_facets:
other_facets = chain_facets.copy()
other_facets.remove(facet)
for other_facet in other_facets:
if len(set(facet.sites) & set(other_facet.sites)) == 2:
if facet in end_facets:
end_facets.remove(facet)
break
else:
end_facets.append(facet)
if verbose:
print("Found " + str(len(end_facets)) + " end facets in chain.")
print("")
if len(end_facets) == 0:
end_facets = chain_facets
print('Could not find a termination facet. Starting chain buildup '
'from the first facet in the list.')
# Sort the chain:
try:
facet_chain = [end_facets[start]]
except IndexError:
print("The requested starting index is too large. Taking the final"
" termination facet in the list.")
facet_chain = [end_facets[-1]]
other_facets = chain_facets.copy()
other_facets.remove(facet_chain[0])
for i in range(len(other_facets)):
options = []
for facet in other_facets:
# See if the facet connects to the last facet in the chain
if len(set(facet.sites) & set(facet_chain[-1].sites)) == 2:
# Check the amount of links this next facet has
leftover_facets = other_facets.copy()
leftover_facets.remove(facet)
number_links = 0
for leftover_facet in leftover_facets:
if len(set(facet.sites) & set(leftover_facet.sites)) \
== 2:
number_links += 1
options.append((facet, number_links))
if len(options) == 1:
facet_chain.append(options[0][0])
other_facets.remove(options[0][0])
else:
for option in options:
if option[1] == 1:
facet_chain.append(option[0])
other_facets.remove(option[0])
break
if len(facet_chain) < chain_length:
print('WARNING: Could not connect all facets.')
return facet_chain
def find_facet_links(self, share_edge=False):
"""
Find the non-equivalent links between facets of the cage
molecule. The facets can be connected by sharing an edge, or a site.
Args:
share_edge (bool): Only return links between facets that share an edge.
Returns:
(List of (cage.Facet, cage.Facet) Tuples) - The
non-equivalent facet links of the Cage.
"""
# Find all links, i.e. possible combinations of surface facets
links = list(combinations(self.facets, 2))
# Find the links that share a site (this automatically finds
# those that share an edge as well).
site_sharing_links = []
for link in links:
cross_section = set(link[0].sites) & set(link[1].sites)
if cross_section:
# In case the user only wants edge-sharing paths, check that
if share_edge:
if len(cross_section) == 2:
site_sharing_links.append(link)
# Else just add the path to the list
else:
site_sharing_links.append(link)
# Find the site sharing paths that are non equivalent
noneq_links = []
for link in site_sharing_links:
# Check to see if the path is equivalent with a path in the List of
# non-equivalent paths
nonequivalent = True
for noneq_link in noneq_links:
for symm in self.symmops:
link_center = (link[0].center + link[1].center) / 2
noneq_link_center = sum((noneq_link[0].center,
noneq_link[1].center)) / 2
symm_link_center = symm.operate(link_center)
connection_vector = symm_link_center - noneq_link_center
if np.linalg.norm(connection_vector) < 1e-2:
nonequivalent = False
if nonequivalent:
noneq_links.append(link)
return noneq_links
def find_noneq_chain_links(self, facets=tuple, symm_tol=SYMMETRY_TOLERANCE,
verbose=False):
"""
Find the links between the facets of the chain that connects a
set of non equivalent facets.
Args:
facets (tuple): Tuple of Facets which are to be used for the
chain. In case no facets are provided, the full list of
non-equivalent facets will be used.
symm_tol (float): Tolerance for the equivalence condition, i.e.
how much the distance between the centers is allowed to be
after a symmetry operation.
verbose (bool): Print information about the analysis procedure.
This is mainly useful when the result is not as expected.
Returns:
(*List of (cage.Facet, cage.Facet) Tuples*) --
The links between the Facets in the chain of non-equivalent
Facets.
"""
facet_chain = self.find_noneq_facet_chain(facets=facets,
symm_tol=symm_tol,
verbose=verbose)
chain_links = []
for index in range(len(facet_chain) - 1):
chain_links.append((facet_chain[index], facet_chain[index + 1]))
return chain_links
def find_farthest_facet(self, point):
"""
Find the Facet of the Molecule that is the farthest away from the point
provided.
Args:
point ((3,) numpy.ndarray): Point provided by user.
Returns:
(*cage.Facet*)
"""
distance = 0
furthest_facet = None
for facet in self.facets:
newdistance = np.linalg.norm(point - facet.center)
if newdistance > distance:
furthest_facet = facet
distance = newdistance
return furthest_facet
def find_closest_facet(self, point):
"""
Find the Facet of the Molecule that is the closest to the point
provided.
Args:
point ((3,) numpy.ndarray): Point provided by user.
Returns:
(*cage.Facet*)
"""
distance = 1e6
closest_facet = None
for facet in self.facets:
newdistance = np.linalg.norm(point - facet.center)
if newdistance < distance:
closest_facet = facet
distance = newdistance
return closest_facet
class OccupiedCage(Cage):
"""
A Cage Molecule that has one or more cations docked on it.
"""
CATIONS = (pmg.Element('Li'), pmg.Element('Na'), pmg.Element('K'),
pmg.Element('Mg'))
def __init__(self, species, coords, charge=0, spin_multiplicity=None,
validate_proximity=False, site_properties=None):
"""
Initialize an OccupiedCage instance. The geometric center of the anion
is automatically centered on the origin.
Args:
species (List of pymatgen.Specie): List of atomic species. Possible
kinds of input include a list of dict of elements/species and
occupancies, a List of elements/specie specified as actual
Element/Specie, Strings ("Fe", "Fe2+") or atomic numbers
(1,56).
coords (List of (3,) numpy.ndarray): List of cartesian coordinates
of each species.
charge (float): Charge for the molecule. Defaults to 0.
validate_proximity (bool): Whether to check if there are sites
that are less than 1 Ang apart. Defaults to False.
site_properties (dict): Properties associated with the sites as
a dict of sequences, e.g., {"magmom":[5,5,5,5]}. The
sequences have to be the same length as the atomic species
and fractional_coords. Defaults to None for no properties.
Returns:
(*cage.Cage*)
"""
super(OccupiedCage, self).__init__(species, coords, charge,
spin_multiplicity,
validate_proximity,
site_properties)
self.center()
self._docks = []
@property
def docks(self):
return self._docks
@property
def facets(self):
return self._facets
def center(self, point=None):
"""
Center the OccupiedCage around a point by updating the sites, i.e. find
the coordinates for the sites so that the geometric center **of the
anion** is moved to the point provided. In case no point is provided,
the anion is centered around the origin.
Note: Running this method will reset the facets and symmetry
information to None.
Args:
point ((3,) numpy.ndarray): Point around which to center the
molecule.
"""
anion_center = self.anion_center
if point is not None:
anion_center -= point
# Find the new coordinates
new_coords = np.array(self.cart_coords) - anion_center
# Update the sites
sites = []
for i in range(len(self.species)):
prop = None
if self.site_properties:
prop = {k: v[i] for k, v in self.site_properties.items()}
sites.append(pmg.Site(self.species[i], new_coords[i],
properties=prop))
self._sites = sites
self._facets = None
self._symmops = None
self._pointgroup = None
self._facet_dict = None
def add_dock(self, dock, cation=None, docking_point=None):
"""
Add a docking site to the OccupiedCage. If the chemical symbol of the
cation is provided, the cation is appended to the OccupiedCage. In case
the cation is equal to *None*, the cation is assumed to be present and
the facet is simply designated as a dock.
Note: If a cation is appended to the molecule. running this method will
reset the facets and symmetry information to None.
Args:
dock (cage.Facet): The Facet on which the cation is docked.
cation (str): The chemical symbol of the cation element.
docking_point ((3,) numpy.ndarray): Docking coordinates of the
cation.
"""
# Check if the dock is on of the facets in the OccupiedCage
if dock not in self.facets:
raise ValueError("Docking facet not found in the facet list of the"
" OccupiedCage.")
if not cation:
self._docks.append(dock)
else:
if docking_point:
self.append(pmg.Element(cation), docking_point)
self.set_charge_and_spin(self.charge,
self.spin_multiplicity - 1)
else:
cation_coord = dock.center + 2 * dock.normal
self.append(pmg.Element(cation), cation_coord)
self.set_charge_and_spin(self.charge,
self.spin_multiplicity - 1)
self._docks.append(dock)
# TODO Add some more checks
@classmethod
def from_cage_and_facets(cls, cage, facets, docking_points=(),
cation='Li'):
"""
Initialize an OccupiedCage from a Cage object and a tuple of facets.
Args:
cage (cage.Cage): The anion on which the cations are docked.
facets (tuple): Tuple of cage.Facets on which the cations are
docked.
docking_points (tuple): Tuple of (3,) numpy.ndarray coordinates
that define the docking coordinates of the corresponding
docking Facets.
cation(str): Chemical symbol of the cation. In case *None* is
given, the docking sites are considered to already have a
cation present.
Returns:
(*cage.OccupiedCage*)
"""
occ_cage = cls(species=cage.species, coords=cage.cart_coords,
charge=cage.charge,
spin_multiplicity=cage.spin_multiplicity,
validate_proximity=True,
site_properties=cage.site_properties)
# Add the docked cations to the Cage
for index in range(len(facets)):
try:
occ_cage.add_dock(facets[index],
docking_point=docking_points[index],
cation=cation)
except IndexError:
occ_cage.add_dock(facets[index], cation=cation)
return occ_cage
@classmethod
def from_poscar(cls, filename):
"""
Initialize an OccupiedCage from a VASP POSCAR file.
Args:
filename:
Returns:
"""
pass # TODO
# @classmethod
# def from_file(cls, filename):
# """
# Initialize an OccupiedCage from a file.
#
# Args:
# filename:
#
# Returns:
#
# """
# pass #TODO
def remove_surface_facet(self, facet):
"""
Remove a surface facet from the list of facets of an OccupiedCage.
Args:
facet (cage.Facet): The facet which is to be removed from the
molecule.
"""
surface_facets = self.facets
if surface_facets:
self._facets = surface_facets.remove(facet)
else:
print('Surface Facets have not been set up yet.')
def find_surface_facets(self, ignore=None):
"""
Find the surface facets of the OccupiedCage, minus the facets which
have a docked cation.
Args:
ignore (Tuple of Elements/Species): The elements to ignore for the
surface facet determination.
"""
mol = self.copy()
anion = [site.coords for site in mol.sites
if site.specie not in OccupiedCage.CATIONS]
mol.center(sum(anion) / len(anion))
super(OccupiedCage, mol).find_surface_facets(ignore=ignore)
surface_facets = mol.facets
for dock in self.docks:
surface_facets.remove(dock)
self._facets = surface_facets
class Facet(SiteCollection, MSONable):
"""
Facet of a Molecule object, defined by a list of Sites.
"""
def __init__(self, sites, normal=None):
"""
Initialize a Facet from a list of sites.
Args:
sites (list): List of pymatgen.Sites that define the facet.
normal (numpy.ndarray): (3,) Numpy array that defines the normal
vector of the facet.
"""
self._sites = sites
self._center = utils.site_center(tuple(self.sites))
if normal is not None:
self._normal = normal # TODO Check if input normal makes sense
else:
self._normal = self._find_normal()
def _find_normal(self):
"""
Finds the normal vector of the facet.
By convention, the normal of the facet is a vector perpendicular to
the surface with length equal to one and pointing away from the origin.
Returns:
(*numpy.ndarray*) -- Normal of the facet.
"""
if len(self.sites) == 3:
normal = np.cross(self._sites[0].coords - self._sites[1].coords,
self._sites[0].coords - self._sites[2].coords)
else:
# TODO Make an average of possible normals
normal = np.cross(self._sites[0].coords - self._sites[1].coords,
self._sites[0].coords - self._sites[2].coords)
# Make length of the normal equal to 1
normal = normal / np.linalg.norm(normal)
# Flip normal in case it is pointing towards the origin.
if utils.angle_between(-self.center, normal) < math.pi / 2:
normal = - normal
return normal
def __str__(self):
"""
Returns:
(*str*) -- String representation of the facet.
"""
output = ['Facet with sites:']
for site in self.sites:
output.append(site.__str__())
return '\n'.join(output)
def __eq__(self, other):
"""
Check if the Facet is equal to another. Two facets are only equal if
they have the same sites and normal.
Args:
other (cage.Facet): Facet for comparison.
Returns:
(*bool*) - Whether or not the facets are equal.
"""
# TODO Check method for facets with more than 3 vertices
if (len(set(self.sites) & set(other.sites)) == len(self.sites)) and \
np.allclose(self.normal, other.normal, atol=1e-3):
return True
else:
return False
def __hash__(self):
"""
Make Facet hashable. This is valuable in order to e.g. make it a key
in dictionaries.
Returns:
(*int*) -- Hash of the Facet.
"""
return hash(str(self))
@property
def sites(self):
"""
The sites that define the Facet.
:return: List of Sites
"""
return self._sites
@property
def center(self):
"""
The center of the Facet.
:return: Array of the center coordinates
"""
return self._center
@property
def normal(self):
"""
Surface normal of the Facet
:return: Array of the normal vector
"""
return self._normal
@classmethod
def from_str(cls, input_string, fmt="json"):
"""
Initialize a Facet from a string.
Currently only supports 'json' formats.
Args:
input_string (str): String from which the Facet is initialized.
fmt (str): Format of the string representation.
Returns:
(*cage.Facet*)
"""
if fmt == "json":
d = json.loads(input_string)
return cls.from_dict(d)
else:
raise NotImplementedError('Only json formats have been '
'implemented.')
@classmethod
def from_file(cls, filename):
"""
Initialize a Facet from a file.
Currently only supports 'json' formats.
Args:
filename (str): File in which the Facet is stored.
Returns:
(*cage.Facet*)
"""
with zopen(filename) as file:
contents = file.read()
return cls.from_str(contents)
@classmethod
def from_dict(cls, d):
"""
Initialize a Facet from a dictionary.
Args:
d (dict): Dictionary of the Facet properties, i.e. 'sites' and
'normal'.
Returns:
(*cage.Facet*)
"""
sites = []
for site in d['sites']:
sites.append(pmg.Site.from_dict(site))
return cls(sites, normal=np.array(d['normal']))
def as_dict(self):
"""
Return a dictionary representation of the Facet.
Returns:
(*dict*)
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"sites": []}
for site in self:
site_dict = site.as_dict()
del site_dict["@module"]
del site_dict["@class"]
d["sites"].append(site_dict)
d['normal'] = self.normal.tolist()
return d
def to(self, fmt="json", filename=None):
"""
Write the Facet to a file.
Currently support only the 'json' format.
Args:
fmt (str): Format of the file.
filename (str): Name of the file to which the facet should be
written.
"""
if fmt == "json":
if filename:
with zopen(filename, "wt", encoding='utf8') as file:
return json.dump(self.as_dict(), file)
else:
return json.dumps(self.as_dict())
else:
raise NotImplementedError("Currently only json format is "
"supported.")
def copy(self):
"""
Make a copy of the Facet.
Returns:
(*cage.Facet*)
"""
return Facet(self._sites, self._normal)
def add_site(self, site):
"""
Add a site to the facet. Will only work in case the site is in the
surface defined by the facet.
Args:
site (pymatgen.core.sites.Site): The site that will be added to
the facet.
"""
# Check if the site is in the surface defined by the facet
angle = self.angle_to_normal(site.coords)
if abs(angle - math.pi / 2) > ANGLE_TOLERANCE:
raise ValueError("Angle to facet normal deviates too much from "
"pi/2. A site cannot be added to the facet if "
"it is not in the same plane.")
# Add the site to the list of sites of the facet
self._sites.append(site)
self._center = utils.site_center(tuple(self.sites))
# TODO Find the new normal, but make sure it doesn't flip direction
def get_normal_intersection(self, other):
"""
Find the intersection of the normal lines of the Facet and another one.
Currently only works on an edge sharing Facet whose normal intersects
with the normal of the facet. In case the normals do not intersect,
an approximation is given.
Args:
other (cage.Facet): Facet which shares an edge with this one.
Returns:
(*numpy.ndarray*) -- (3,) Numpy array of the intersection
coordinates.
"""
# TODO This method needs improvement, i.e. to be made more general
edge = set(self.sites) & set(other.sites)
if len(edge) != 2:
raise ValueError('Provided facet does not share an edge.')
edge_middle = sum([site.coords for site in edge]) / 2
y1 = utils.distance(self.center, edge_middle)
y2 = utils.distance(other.center, edge_middle)
beta = utils.angle_between(self.normal, other.normal)
psi = math.atan2(math.sin(beta), (y1 / y2 + math.cos(beta)))
theta = beta - psi
r = y1 / math.sin(theta)
r1 = r * math.cos(theta)
r2 = r * math.cos(psi)
intersection1 = self.center - r1 * utils.unit_vector(self.normal)
intersection2 = other.center - r2 * utils.unit_vector(other.normal)
if utils.distance(intersection1, intersection2) < 1e-4:
return intersection1
else:
print('Could not find perfect intersection. Returned average '
'between two best results on the respective normal lines.')
return (intersection1 + intersection2) / 2
def redefine_origin(self, origin):
"""
Adjust the coordinates of the Facet, in order to redefine the origin.
Args:
origin (numpy.ndarray): (3,) Numpy array of the new origin's
coordinates.
"""
# Find the new coordinates
new_coords = np.array(self.cart_coords) - origin
# Update the sites
sites = []
for i in range(len(self.species)):
prop = None
if self.site_properties:
prop = {k: v[i] for k, v in self.site_properties.items()}
sites.append(pmg.Site(self.species[i], new_coords[i],
properties=prop))
self._sites = sites
def get_distance(self, i, j):
"""
Calculate the distance between two sites.
Args:
i:
j:
Returns:
"""
pass # TODO
def is_equivalent(self, other, symmops, tol=SYMMETRY_TOLERANCE):
"""
Check if a Facet is equivalent to another Facet based on a list of
symmetry operations.
Args:
other (cage.Facet): Facet with which to compare the Facet.
symmops (list): List of pymatgen.Symmop instances which are used
to determine equivalency.
tol (float): Tolerance for the equivalence condition, i.e. how much
the distance between the centers is allowed to be after
a symmetry operation.
Returns:
(*bool*) - Whether or not the facet is equivalent to the other
facet.
"""
is_equivalent = False
for symm in symmops:
symm_center = symm.operate(self.center)
if np.linalg.norm(symm_center, other.center) < tol:
is_equivalent = True
return is_equivalent
def flip_normal(self):
"""
Flip the direction of the surface normal of the Facet.
"""
self._normal = -self._normal
def angle_to_normal(self, point):
"""
Find the angle between the vector that connects the center and the
point and the normal.
Args:
point (numpy.ndarray): Coordinates of the point.
Returns:
(*float*): Angle to the normal of the facet.
"""
return utils.angle_between(point - self._center, self._normal)
|
mbercx/cage
|
cage/core.py
|
Python
|
mit
| 51,327
|
[
"VASP",
"pymatgen"
] |
9cb4b439961acff3eaf65ee4e4d72525df613f25f89940cdf8fac0e8faf9ca22
|
#!/usr/bin/env python
import unittest
import postpic as pp
from postpic import io
#import postpic.io.common
import numpy as np
import os
class TestIO(unittest.TestCase):
def gettempfile(self, suffix=''):
import tempfile
h, f = tempfile.mkstemp(suffix=suffix)
os.close(h)
print('filename is {}'.format(f))
self._tempfiles.append(f)
return f
def setUp(self):
self._tempfiles = []
pp.chooseCode('DUMMY')
self.dump = pp.datareader.readDump(100)
self.testfield = self.dump.Ey()
def tearDown(self):
for f in self._tempfiles:
os.remove(f)
def test_importexport_npz(self):
filename = self.gettempfile(suffix='.npz')
self.testfield.export(filename)
testfield2 = pp.load_field(filename)
# now check if fields are equal
self.assertTrue(np.all(np.isclose(np.asarray(self.testfield),
np.asarray(testfield2))))
# metadata
self.assertEqual(self.testfield.name, testfield2.name)
self.assertEqual(self.testfield.unit, testfield2.unit)
self.assertTrue(np.all(self.testfield.axes_transform_state == testfield2.axes_transform_state))
self.assertTrue(np.all(self.testfield.transformed_axes_origins == testfield2.transformed_axes_origins))
# axes
for n in range(0, len(self.testfield.axes)):
self.assertEqual(len(self.testfield.axes[n]), len(testfield2.axes[n]))
self.assertTrue(np.all(np.isclose(self.testfield.axes[n].grid_node,
testfield2.axes[n].grid_node)))
def test_export_csv(self):
filename = self.gettempfile(suffix='.csv')
self.testfield.export(filename)
def test_export_vtk(self):
filename = self.gettempfile(suffix='.vtk')
self.testfield.export(filename)
def test_arraydata(self):
fieldX = np.arange(0,24).reshape(2,3,4)
fieldY = np.arange(1,25).reshape(2,3,4)
fieldZ = np.arange(2,26).reshape(2,3,4)
vectors_help = io.vtk.ArrayData(fieldX, fieldY, fieldZ).transform_data(np.dtype('I'))
reference = [0, 1, 2, 12, 13, 14, 4, 5, 6, 16, 17, 18, 8, 9, 10, 20, 21, 22,
1, 2, 3, 13, 14, 15, 5, 6, 7, 17, 18, 19, 9, 10, 11, 21, 22, 23,
2, 3, 4, 14, 15, 16, 6, 7, 8, 18, 19, 20, 10, 11, 12, 22, 23, 24,
3, 4, 5, 15, 16, 17, 7, 8, 9, 19, 20, 21, 11, 12, 13, 23, 24, 25]
self.assertTrue(np.all(vectors_help == reference))
if __name__ == '__main__':
unittest.main()
|
stetie/postpic
|
test/test_io.py
|
Python
|
gpl-3.0
| 2,644
|
[
"VTK"
] |
2ed039d04011c6e1b90d09ffc88b66005c72eaa0069e06f31b7344b6519bb931
|
# -*- coding: utf-8 -*
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Running SageNB Tests
Functions for running SageNB tests. This can also be used a script.
NOTE:
The SageNB tests tests assume a Selenium server or Grid hub is running
with the options given in :mod:`sagenb.testing.notebook_test_case` or
set by :func:`setup_tests`.
Selenium server can be downloaded from the Selenium `download page
<http://seleniumhq.org/download/>`_ as part of the Selenium RC package
and can be run with `java -jar selenium-server.jar`. To set up
Selenium Grid, please visit its `home page
<http://selenium-grid.seleniumhq.org/>`_ for instructions.
TODO:
- Add extra functionality to this script
- Include a proxy to this script in the Sage scripts repo.
"""
# Developer note:
# The Selenium server cannot be included in the package because
# of the possibility of incompatible libraries and binaries with
# those of the user's browser (e.g., Python, etc.)
from __future__ import print_function
import unittest
import notebook_test_case
from ..config import SAGE_BROWSER
from tests import test_accounts, test_worksheet, test_worksheet_list
CASES = {
'TestAccounts': test_accounts,
'TestWorksheet': test_worksheet,
'TestWorksheetList': test_worksheet_list
}
all_tests = unittest.TestSuite((test_accounts.suite,
test_worksheet.suite,
test_worksheet_list.suite))
def setup_tests(address='localhost', secure=False,
environment='*firefox3 /usr/bin/firefox'):
"""
Sets selected options for SageNB Selenium tests.
INPUT:
- ``address`` - a string (default: 'localhost'); address of the
network interface at which the notebook server listens. Do not
leave this empty; see :mod:`sagenb.testing.notebook_test_case`
for details.
- ``secure`` - a boolean (default: False); whether to launch a
secure notebook server. Note: Browser security warnings will
yield failed tests. To work around these in Firefox, close all
windows, create a new profile (e.g., `firefox -P selenium`),
browse to a secure notebook server, accept the certificate, and
quit. Then launch the Selenium server with, e.g.,
java -jar selenium-server -firefoxProfileTemplate $HOME/selenium/firefox
and run the tests. A minimal profile template directory can
contain just the files `cert8.db` and `cert_override.txt`.
- ``environment`` - a string (default: '*firefox3
/usr/bin/firefox'); the browser environment in which to run the
tests. The path is optional. However, for the Selenium server
to have complete control over the launched browser, it's best to
give the full path to the browser *executable* (i.e., not a
shell script).
Possible environments include '*chrome', '*firefox',
'*firefox3', '*googlechrome', '*iexplore', '*opera', '*safari'.
EXAMPLES::
sage: import sagenb.testing.run_tests as rt # not tested
sage: env = '*firefox3 /usr/lib64/firefox-3.5.6/firefox' # not tested
sage: rt.setup_tests('localhost', True, env) # not tested
sage: rt.run_any() # not tested
sage: rt.setup_tests('localhost', True, '*opera') # not tested
sage: rt.run_and_report() # not tested
"""
# TODO: Add a directory option for parallel testing.
notebook_test_case.NB_OPTIONS['address'] = address
notebook_test_case.NB_OPTIONS['secure'] = secure
notebook_test_case.SEL_OPTIONS['environment'] = environment
def run_any(tests=all_tests, make_report=False, **kwargs):
"""
Creates and runs an ad hoc test suite from a test name, case,
suite, or a mixed list thereof. If no matching tests are found,
no tests are run.
INPUT:
- ``tests`` - a string, :class:`unittest.TestCase`,
:class:`unittest.TestSuite`, or a mixed list thereof. Strings
can be test names, with or without the prefix 'test_'.
- ``make_report`` - a boolean (default: False); whether to
generate a HTML report of the test results.
- ``kwargs`` - a dictionary; additional keyword options to pass to
:func:`run_suite` or :func:`run_and_report`.
EXAMPLES::
sage: import sagenb.testing.run_tests as rt # not tested
sage: rt.run_any('simple_evaluation', make_report=True) # not tested
sage: rt.run_any(['4088', 'test_3711'], verbosity=1) # not tested
sage: rt.run_any('foo', False) # not tested
sage: rt.run_any(rt.test_accounts.TestAccounts) # not tested
sage: rt.run_any(make_report=True) # not tested
"""
import inspect
from_name = unittest.TestLoader().loadTestsFromName
from_case = unittest.TestLoader().loadTestsFromTestCase
if not isinstance(tests, list):
tests = [tests]
alist = []
for t in tests:
if isinstance(t, str):
if not t.startswith('test_'):
t = 'test_' + t
for c in CASES:
try:
alist.append(from_name(c + '.' + t, module = CASES[c]))
except AttributeError:
pass
elif inspect.isclass(t) and issubclass(t, unittest.TestCase):
alist.append(from_case(t))
elif isinstance(t, unittest.TestSuite):
alist.append(t)
if alist:
suite = unittest.TestSuite(alist)
tot = suite.countTestCases()
environment = notebook_test_case.SEL_OPTIONS['environment']
print('Running %d test%s in environment %s...' % (
tot, '' if tot == 1 else 's', environment))
if make_report:
run_and_report(suite, environment = environment, **kwargs)
else:
run_suite(suite, **kwargs)
def run_suite(suite=all_tests, verbosity=2):
"""
Runs a test suite.
For the SageNB test suite, this assumes a Selenium server or Grid
hub is running with the options given in
:mod:`sagenb.testing.notebook_test_case` or set by
:func:`setup_tests`
INPUT:
- ``suite`` - a TestSuite instance (default: all_tests); the test
suite to run
- ``verbosity`` - an integer (default: 2); how verbosely to report
instantaneous test results
EXAMPLES::
sage: import sagenb.testing.run_tests as rt # not tested
sage: rt.run_suite() # not tested
sage: rt.run_suite(rt.test_worksheet.suite, verbosity=1) # not tested
"""
unittest.TextTestRunner(verbosity=verbosity).run(suite)
def run_and_report(suite=all_tests, verbosity=2, report_filename='report.html',
title='Sage Notebook Tests',
description='Selenium test results',
open_viewer=True, **kwargs):
"""
Runs a test suite and generates a HTML report with the outcome
(pass, fail, or error) and output, including any tracebacks, for
each test, plus overall statistics.
For the SageNB test suite, this assumes a Selenium server or Grid
hub is running with the options given in
:mod:`sagenb.testing.notebook_test_case` or set by
:func:`setup_tests`.
INPUT:
- ``suite`` - a TestSuite instance (default: all_tests); the test
suite to run
- ``verbosity`` - an integer (default: 2); how verbosely to report
instantaneous test results
- ``report_filename`` - a string (default: 'report.html'); the
report's filename
- ``title`` - a string (default: 'Sage Notebook Tests'); the
report's title
- ``description`` - a string (default: 'Selenium test results'); a
description included near the beginning of the report
- ``open_viewer`` - a boolean (default: True); whether to open
the report in a web browser
- ``kwargs`` - a dictionary; extra keyword arguments passed to the
test runner's constructor
EXAMPLES::
sage: import sagenb.testing.run_tests as rt # not tested
sage: rt.run_and_report() # not tested
sage: rt.run_and_report(report_filename='test1.html') # not tested
sage: rt.run_and_report(rt.test_accounts.suite) # not tested
"""
from HTMLTestRunner import HTMLTestRunner
report_fd = open(report_filename, 'w')
runner = HTMLTestRunner(verbosity = verbosity, stream = report_fd,
title = title, description = description,
**kwargs)
runner.run(suite)
if open_viewer:
import os, subprocess
subprocess.Popen(SAGE_BROWSER + ' ' + os.path.abspath(report_filename),
shell=True)
if __name__ == '__main__':
run_suite()
|
migeruhito/sagenb
|
sagenb/testing/run_tests.py
|
Python
|
gpl-3.0
| 8,950
|
[
"VisIt"
] |
dadb60d65ebbd309ba3353879b5e7c35aa88ab656e7ab9ab519bfa8770351fd9
|
# Copyright 2004-2010 PyTom <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This file contains support for state-machine controlled animations.
import renpy
import random
class State(object):
"""
This creates a state that can be used in a SMAnimation.
"""
def __init__(self, name, image, *atlist, **properties):
"""
@param name: A string giving the name of this state.
@param image: The displayable that is shown to the user while
we are in (entering) this state. For convenience, this can
also be a string or tuple, which is interpreted with Image.
image should be None when this State is used with motion,
to indicate that the image will be replaced with the child of
the motion.
@param atlist: A list of functions to call on the image. (In
general, if something can be used in an at clause, it can be
used here as well.)
If any keyword arguments are given, they are used to construct a
Position object, that modifies the position of the image.
"""
if image and not isinstance(image, renpy.display.core.Displayable):
image = renpy.easy.displayable(image)
self.name = name
self.image = image
self.atlist = atlist
self.properties = properties
def add(self, sma):
sma.states[self.name] = self
def get_image(self):
rv = self.image
for i in self.atlist:
rv = i(rv)
if self.properties:
rv = renpy.display.layout.Position(rv, **self.properties)
return rv
def motion_copy(self, child):
if self.image is not None:
child = self.image
return State(self.name, child, *self.atlist)
class Edge(object):
"""
This creates an edge that can be used with a SMAnimation.
"""
def __init__(self, old, delay, new, trans=None, prob=1):
"""
@param old: The name (a string) of the state that this transition is from.
@param delay: The number of seconds that this transition takes.
@param new: The name (a string) of the state that this transition is to.
@param trans: The transition that will be used to show the
image found in the new state. If None, the image is show
immediately.
When used with an SMMotion, the transition should probably be
move.
@param prob: The number of times this edge is added. This can
be used to make a transition more probable then others. For
example, if one transition out of a state has prob=5, and the
other has prob=1, then the one with prob=5 will execute 5/6 of
the time, while the one with prob=1 will only occur 1/6 of the
time. (Don't make this too large, as memory use is proportional to
this value.)
"""
self.old = old
self.delay = delay
self.new = new
self.trans = trans
self.prob = prob
def add(self, sma):
for i in range(0, self.prob):
sma.edges.setdefault(self.old, []).append(self)
class SMAnimation(renpy.display.core.Displayable):
"""
This creates a state-machine animation. Such an animation is
created by randomly traversing the edges between states in a
defined state machine. Each state corresponds to an image shown to
the user, with the edges corresponding to the amount of time an
image is shown, and the transition it is shown with.
Images are shown, perhaps with a transition, when we are
transitioning into a state containing that image.
"""
def __init__(self, initial, *args, **properties):
"""
@param initial: The name (a string) of the initial state we
start in.
@param showold: If the keyword parameter showold is True, then
the old image is shown instead of the new image when in an
edge.
@param anim_timebase: If True, we use the animation
timebase. If False, we use the displayable timebase.
This accepts as additional arguments the anim.State and
anim.Edge objects that are used to make up this state
machine.
"""
if 'delay' in properties:
self.delay = properties['delay']
del properties['delay']
else:
self.delay = None
if 'showold' in properties:
self.showold = properties['showold']
del properties['showold']
else:
self.showold = False
if 'anim_timebase' in properties:
self.anim_timebase = properties['anim_timebase']
del properties['anim_timebase']
else:
self.anim_timebase = True
super(SMAnimation, self).__init__(**properties)
self.properties = properties
# The initial state.
self.initial = initial
# A map from state name to State object.
self.states = { }
# A map from state name to list of Edge objects.
self.edges = { }
for i in args:
i.add(self)
# The time at which the current edge started. If None, will be
# set to st by render.
self.edge_start = None
# A cache for what the current edge looks like when rendered.
self.edge_cache = None
# The current edge.
self.edge = None
# The state we're in.
self.state = None
def visit(self):
return [ i.image for i in self.states.itervalues() ]
def pick_edge(self, state):
"""
This randomly picks an edge out of the given state, if
one exists. It updates self.edge if a transition has
been selected, or returns None if none can be found. It also
updates self.image to be the new image on the selected edge.
"""
if state not in self.edges:
self.edge = None
return
edges = self.edges[state]
self.edge = random.choice(edges)
self.state = self.edge.new
def update_cache(self):
"""
Places the correct Displayable into the edge cache, based on
what is contained in the given edge. This takes into account
the old and new states, and any transition that is present.
"""
if self.edge.trans:
im = self.edge.trans(old_widget=self.states[self.edge.old].get_image(),
new_widget=self.states[self.edge.new].get_image())
elif self.showold:
im = self.states[self.edge.old].get_image()
else:
im = self.states[self.edge.new].get_image()
self.edge_cache = im
def get_placement(self):
if self.edge_cache:
return self.edge_cache.get_placement()
if self.state:
return self.states[self.state].get_image().get_placement()
return super(SMAnimation, self).get_placement()
def render(self, width, height, st, at):
if self.anim_timebase:
t = at
else:
t = st
if self.edge_start is None or t < self.edge_start:
self.edge_start = t
self.edge_cache = None
self.pick_edge(self.initial)
while self.edge and t > self.edge_start + self.edge.delay:
self.edge_start += self.edge.delay
self.edge_cache = None
self.pick_edge(self.edge.new)
# If edge is None, then we have a permanent, static picture. Deal
# with that.
if not self.edge:
im = renpy.display.render.render(self.states[self.state].get_image(),
width, height,
st - self.edge_start, at)
# Otherwise, we have another edge.
else:
if not self.edge_cache:
self.update_cache()
im = renpy.display.render.render(self.edge_cache, width, height, t - self.edge_start, at)
if not renpy.game.less_updates:
renpy.display.render.redraw(self.edge_cache, self.edge.delay - (t - self.edge_start))
iw, ih = im.get_size()
rv = renpy.display.render.Render(iw, ih)
rv.blit(im, (0, 0))
return rv
def __call__(self, child=None, new_widget=None, old_widget=None):
"""
Used when this SMAnimation is used as a SMMotion. This creates
a duplicate of the animation, with all states containing None
as the image having that None replaced with the image that is provided here.
"""
if child is None:
child = new_widget
args = [ ]
for state in self.states.itervalues():
args.append(state.motion_copy(child))
for edges in self.edges.itervalues():
args.extend(edges)
return SMAnimation(self.initial, delay=self.delay, *args, **self.properties)
# class Animation(renpy.display.core.Displayable):
# """
# A Displayable that draws an animation, which is a series of images
# that are displayed with time delays between them.
# """
# def __init__(self, *args, **properties):
# """
# Odd (first, third, fifth, etc.) arguments to Animation are
# interpreted as image filenames, while even arguments are the
# time to delay between each image. If the number of arguments
# is odd, the animation will stop with the last image (well,
# actually delay for a year before looping). Otherwise, the
# animation will restart after the final delay time.
# @param anim_timebase: If True, the default, use the animation
# timebase. Otherwise, use the displayable timebase.
# """
# properties.setdefault('style', 'animation')
# self.anim_timebase = properties.pop('anim_timebase', True)
# super(Animation, self).__init__(**properties)
# self.images = [ ]
# self.delays = [ ]
# for i, arg in enumerate(args):
# if i % 2 == 0:
# self.images.append(renpy.easy.displayable(arg))
# else:
# self.delays.append(arg)
# if len(self.images) > len(self.delays):
# self.delays.append(365.25 * 86400.0) # One year, give or take.
# def render(self, width, height, st, at):
# if self.anim_timebase:
# t = at % sum(self.delays)
# else:
# t = st % sum(self.delays)
# for image, delay in zip(self.images, self.delays):
# if t < delay:
# renpy.display.render.redraw(self, delay - t)
# im = renpy.display.render.render(image, width, height, t, at)
# width, height = im.get_size()
# rv = renpy.display.render.Render(width, height)
# rv.blit(im, (0, 0))
# return rv
# else:
# t = t - delay
# def visit(self):
# return self.images
def Animation(*args, **kwargs):
newargs = [ ]
for i, a in enumerate(args):
newargs.append(a)
if i % 2 == 1:
newargs.append(None)
return TransitionAnimation(*newargs, **kwargs)
class TransitionAnimation(renpy.display.core.Displayable):
"""
A displayable that draws an animation with each frame separated
by a transition.
"""
def __init__(self, *args, **properties):
"""
This takes arguments such that the 1st, 4th, 7th, ...
arguments are displayables, the 2nd, 5th, 8th, ... on arguments
are times, and the 3rd, 6th, 9th, ... are transitions.
This displays the first displayable for the given time, then
transitions to the second displayable using the given
transition, and shows it for the given time (the time of the
transition is taken out of the time the frame is shown), and
so on.
The last argument may be a displayable (in which case that
displayable is used to transition back to the first frame), or
a displayable (which is shown forever).
There is one keyword argument, apart from the style properties:
@param anim_timebase: If True, the default, use the animation
timebase. Otherwise, use the displayable timebase.
"""
properties.setdefault('style', 'animation')
self.anim_timebase = properties.pop('anim_timebase', True)
super(TransitionAnimation, self).__init__(**properties)
images = [ ]
delays = [ ]
transitions = [ ]
for i, arg in enumerate(args):
if i % 3 == 0:
images.append(renpy.easy.displayable(arg))
elif i % 3 == 1:
delays.append(arg)
else:
transitions.append(arg)
if len(images) > len(delays):
delays.append(365.25 * 86400.0) # One year, give or take.
if len(images) > len(transitions):
transitions.append(None)
self.images = images
self.prev_images = [ images[-1] ] + images[:-1]
self.delays = delays
self.transitions = [ transitions[-1] ] + transitions[:-1]
def render(self, width, height, st, at):
if self.anim_timebase:
orig_t = at
else:
orig_t = st
t = orig_t % sum(self.delays)
for image, prev, delay, trans in zip(self.images, self.prev_images, self.delays, self.transitions):
if t < delay:
if not renpy.game.less_updates:
renpy.display.render.redraw(self, delay - t)
if trans and orig_t >= self.delays[0]:
image = trans(old_widget=prev, new_widget=image)
im = renpy.display.render.render(image, width, height, t, at)
width, height = im.get_size()
rv = renpy.display.render.Render(width, height)
rv.blit(im, (0, 0))
return rv
else:
t = t - delay
def visit(self):
return self.images
class Blink(renpy.display.core.Displayable):
"""
"""
def __init__(self, image, on=0.5, off=0.5, rise=0.5, set=0.5,
high=1.0, low=0.0, offset=0.0, anim_timebase=False, **properties):
"""
This takes as an argument an image or widget, and blinks that image
by varying its alpha. The sequence of phases is
on - set - off - rise - on - ... All times are given in seconds, all
alphas are fractions between 0 and 1.
@param image: The image or widget that will be blinked.
@param on: The amount of time the widget spends on, at high alpha.
@param off: The amount of time the widget spends off, at low alpha.
@param rise: The amount time the widget takes to ramp from low to high alpha.
@param set: The amount of time the widget takes to ram from high to low.
@param high: The high alpha.
@param low: The low alpha.
@param offset: A time offset, in seconds. Use this to have a
blink that does not start at the start of the on phase.
@param anim_timebase: If True, use the animation timebase, if false, the displayable timebase.
"""
super(Blink, self).__init__(**properties)
self.image = renpy.easy.displayable(image)
self.on = on
self.off = off
self.rise = rise
self.set = set
self.high = high
self.low = low
self.offset = offset
self.anim_timebase = anim_timebase
self.cycle = on + set + off + rise
def visit(self):
return [ self.image ]
def render(self, height, width, st, at):
if self.anim_timebase:
t = at
else:
t = st
time = (self.offset + t) % self.cycle
alpha = self.high
if 0 <= time < self.on:
delay = self.on - time
alpha = self.high
time -= self.on
if 0 <= time < self.set:
delay = 0
frac = time / self.set
alpha = self.low * frac + self.high * (1.0 - frac)
time -= self.set
if 0 <= time < self.off:
delay = self.off - time
alpha = self.low
time -= self.off
if 0 <= time < self.rise:
delay = 0
frac = time / self.rise
alpha = self.high * frac + self.low * (1.0 - frac)
rend = renpy.display.render.render(self.image, height, width, st, at)
w, h = rend.get_size()
rv = renpy.display.render.Render(w, h)
rv.blit(rend, (0, 0))
rv.alpha = alpha
if not renpy.game.less_updates:
renpy.display.render.redraw(self, delay)
return rv
def Filmstrip(image, framesize, gridsize, delay, frames=None, loop=True, **properties):
"""
This creates an animation from a single image. This image
must consist of a grid of frames, with the number of columns and
rows in the grid being taken from gridsize, and the size of each
frame in the grid being taken from framesize. This takes frames
and sticks them into an Animation, with the given delay between
each frame. The frames are taken by going from left-to-right
across the first row, left-to-right across the second row, and
so on until all frames are consumed, or a specified number of
frames are taken.
@param image: The image that the frames must be taken from.
@param framesize: A (width, height) tuple giving the size of
each of the frames in the animation.
@param gridsize: A (columns, rows) tuple giving the number of
columns and rows in the grid.
@param delay: The delay, in seconds, between frames.
@param frames: The number of frames in this animation. If None,
then this defaults to colums * rows frames, that is, taking
every frame in the grid.
@param loop: If True, loop at the end of the animation. If False,
this performs the animation once, and then stops.
Other keyword arguments are as for anim.SMAnimation.
"""
width, height = framesize
cols, rows = gridsize
if frames is None:
frames = cols * rows
i = 0
# Arguments to Animation
args = [ ]
for r in range(0, rows):
for c in range(0, cols):
x = c * width
y = r * height
args.append(renpy.display.im.Crop(image, x, y, width, height))
args.append(delay)
i += 1
if i == frames:
break
if i == frames:
break
if not loop:
args.pop()
return Animation(*args, **properties)
|
MSEMJEJME/ReAlistair
|
renpy/display/anim.py
|
Python
|
gpl-2.0
| 20,638
|
[
"VisIt"
] |
ac21ccd5a522071748c6acfcedcf3fa44f57b68d2c9374e96f9b89cdf4f274ec
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from numpy.testing import (
assert_,
)
import MDAnalysis as mda
from MDAnalysisTests.topology.base import ParserBase
from MDAnalysisTests.datafiles import (
XYZ,
XYZ_bz2,
XYZ_mini,
)
class XYZBase(ParserBase):
parser = mda.topology.XYZParser.XYZParser
expected_n_residues = 1
expected_n_segments = 1
expected_attrs = ['names']
guessed_attrs = ['types', 'masses']
class TestXYZMini(XYZBase):
filename = XYZ_mini
expected_n_atoms = 3
class TestXYZParser(XYZBase):
filename = XYZ
expected_n_atoms = 1284
class TestXYZParserBz2(TestXYZParser):
filename = XYZ_bz2
|
alejob/mdanalysis
|
testsuite/MDAnalysisTests/topology/test_xyz.py
|
Python
|
gpl-2.0
| 1,663
|
[
"MDAnalysis"
] |
859d768da54d220e819c5fd09d9c51067641c25375d2ace5b7aa6e63d2aa55d7
|
import ast
from .base import BaseAnalyzer, Result, DeprecatedCodeVisitor
DESCRIPTION = """
``{name}`` function has been deprecated in Django 1.3 and will be removed in
1.5.
"""
class GenericViewsVisitor(DeprecatedCodeVisitor):
interesting = {
'django.views.generic.simple': ['direct_to_template', 'redirect_to'],
'django.views.generic.simple.direct_to_template': None,
'django.views.generic.simple.redirect_to': None,
'django.views.generic.date_based': [
'archive_index', 'archive_year', 'archive_month', 'archive_week',
'archive_day', 'archive_today', 'archive_detail'],
'django.views.generic.date_based.archive_index': None,
'django.views.generic.date_based.archive_year': None,
'django.views.generic.date_based.archive_month': None,
'django.views.generic.date_based.archive_week': None,
'django.views.generic.date_based.archive_day': None,
'django.views.generic.date_based.archive_today': None,
'django.views.generic.date_based.archive_detail': None,
'django.views.generic.list_detail': ['object_list', 'object_detail'],
'django.views.generic.list_detail.object_list': None,
'django.views.generic.list_detail.object_detail': None,
'django.views.generic.create_update': [
'create_object', 'update_object', 'delete_object'],
'django.views.generic.create_update.create_object': None,
'django.views.generic.create_update.update_object': None,
'django.views.generic.create_update.delete_object': None,
}
class GenericViewsAnalyzer(BaseAnalyzer):
def analyze_file(self, filepath, code):
if not isinstance(code, ast.AST):
return
visitor = GenericViewsVisitor()
visitor.visit(code)
for name, node, start, stop in visitor.get_found():
result = Result(
description=DESCRIPTION.format(name=name),
path=filepath,
line=start,
)
lines = self.get_file_lines(filepath, start, stop)
for lineno, important, text in lines:
result.source.add_line(lineno, text, important)
yield result
|
alfredhq/djlint
|
djlint/analyzers/generic_views.py
|
Python
|
isc
| 2,239
|
[
"VisIt"
] |
ab9ae30d2c2fb218941532090a6e7baf0029e9acf2233d90eb8b6a47c2e6f124
|
#!/usr/bin/python
#=============================================================================================
# example files for reading in MD simulation files and performing
# statistical analyses according to manuscript "Simple tests for
# validity when sampling from thermodynamic ensembles", Michael
# R. Shirts.
#
# COPYRIGHT NOTICE
#
# Written by Michael R. Shirts <mrshirts@gmail.com>.
#
# Copyright (c) 2012 The University of Virginia. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it under the terms of
# the GNU General Public License as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
# =============================================================================================
#
#===================================================================================================
# IMPORTS
#===================================================================================================
import numpy
import timeseries
import checkensemble
import optparse, sys
from optparse import OptionParser
import readmdfiles
parser = OptionParser()
parser.add_option("-f", "--files", dest="datafiles",nargs = 2,
help="the two files of different temperature for analysis")
parser.add_option("-d", "--directory", dest="datafile_directory",default ='./',
help="the directory the data files are is in")
parser.add_option("-k", "--nolikelihood", dest="bMaxLikelihood", action="store_false",default=True,
help="Don't run maximum likelihood analysis [default = run this analysis]")
parser.add_option("-l", "--nolinearfit", dest="bLinearFit", action="store_false",default=True,
help="Don't run linear fit analysis [default = run this analysis]")
parser.add_option("-n", "--nononlinearfit", dest="bNonLinearFit", action="store_false",default=True,
help="Don't run nonlinear fit analysis [default = run this analysis]")
parser.add_option("-t", "--temperature", nargs = 2, dest="T_k", type="float",default=[0,0],
help="low and high temperatures, [default = %default]")
parser.add_option("-p", "--pressure", nargs = 2, dest="P_k", type="float",default=[0,0],
help="low and high pressures, [default = %default]")
parser.add_option("-m", "--chem.potential", nargs = 2, dest="mu_k", type="float",default=[0,0],
help="low and high chemical potentials, [default = %default]")
parser.add_option("-e", "--energytype", dest="type", default="total",
help="the type of energy that is being analyzed [default = %default]")
parser.add_option("-b", "--nboot", dest="nboots", type="int",default=200,
help="number of bootstrap samples performed [default = %default]")
parser.add_option("-i", "--nbins", dest="nbins",type = "int", default=30,
help="number of bins for bootstrapping [default = %default]")
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",default=False,
help="more verbosity")
parser.add_option("-g", "--figurename", dest="figname", default='figure.pdf',
help="name for the figure")
parser.add_option("-s", "--seed", dest="seed", type = "int", default=None,
help="random seed for bootstrap sampling")
parser.add_option("-c", "--efficiency", nargs = 2, dest="efficiency", type = "float", default=None,
help="statistical efficiency to overwrite the calculated statistical efficiency")
parser.add_option("-u", "--useefficiency", dest="useg", type = "string", default='subsample',
help= "calculate the efficiency by scaling the uncertainty, or by subsampling the input data")
parser.add_option("--filetype", dest="filetype", type = "string", default='flatfile',
help= "specified the type of the file analyzed. options are gromacs .xvg, charmm output, desmond .ene, and flat files")
parser.add_option("--kB", dest="kB", type = "float", default=1.3806488*6.0221415/1000.0,
help="Boltzmann's constant in the applicable units for this sytem")
# Boltzmann's constant (kJ/mol/K)
(options, args) = parser.parse_args()
filetypes_supported = ['flatfile','gromacs','charmm','desmond']
if options.datafiles is None:
print "\nQuitting: No files were input!\n"
sys.exit()
type = options.type
onlyE = ['potential', 'kinetic', 'total']
requireV = ['enthalpy', 'volume', 'jointEV']
requireN = ['helmholtz', 'number', 'jointEN']
alltypes = onlyE + requireV + requireN
if not (type in alltypes):
print "type of energy %s isn't defined!" % (type)
print "Must be one of ",
print alltypes
sys.exit()
if type in onlyE:
analysis_type = 'dbeta-constV'
elif (type == 'enthalpy'):
analysis_type = 'dbeta-constP'
elif (type == 'volume'):
analysis_type = 'dpressure-constB'
elif (type == 'jointEV'):
analysis_type = 'dbeta-dpressure'
elif (type == 'helmholtz'):
analysis_type = 'dbeta-constmu'
elif (type == 'number'):
analysis_type = 'dmu-constB'
elif (type == 'jointEN'):
analysis_type = 'dbeta-dmu'
else:
print "analysis type %s not defined: I'll go with total energy" % (type)
analysis_type = 'dbeta-constV'
if (not(options.useg == 'scale' or options.useg == 'subsample')):
print "Error: for -u, only options \'scale\' and \'subsample\' allowed"
sys.exit()
#===================================================================================================
# CONSTANTS
#===================================================================================================
verbose = options.verbose
T_k = numpy.array(options.T_k) #T_k = numpy.array([132.915071475571,137.138128524429]) # temperatures
P_k = numpy.array(options.P_k) #P_k = numpy.array([1.0, 21.0]) # pressures
mu_k = numpy.array(options.mu_k) #mu_k = numpy.array([1.0, 21.0]) # chemical potential
kB = options.kB
names = ['down','up']
nboots = options.nboots
nbins = options.nbins
bMaxLikelihood = options.bMaxLikelihood
bNonLinearFit = options.bNonLinearFit
bLinearFit = options.bLinearFit
figname = options.figname
if not (options.filetype in filetypes_supported):
print "Error: for -filetype, I currently only know about filetypes",
print filetypes_supported
sys.exit()
if type[0:5] == 'joint':
bLinearFit = False
bNonLinearFit = False
bMaxLikelhood = True
print "For joint simulations, can only run maximum likelihood, overwriting other options"
if (verbose):
print "verbosity is %s" % (str(verbose))
print "Energy type is %s" % (type)
print "\'%s\' temperature is %f" % (names[0],T_k[0])
print "\'%s\' temperature is %f" % (names[1],T_k[1])
if type in requireV:
print "\'%s\' pressure is %f" % (names[0],P_k[0])
print "\'%s\' pressure is %f" % (names[1],P_k[1])
if type in requireN:
print "\'%s\' chemical potential is %f" % (names[0],mu_k[0])
print "\'%s\' chemical potential is %f" % (names[1],mu_k[1])
print "Number of bootstraps is %d" % (nboots)
print "Number of bins (not used for maximum likelihood) is %d" % (nbins)
if (bMaxLikelihood):
print "Generating maximum likelihood statistics"
else:
print "Not generating maximum likelihood statistics"
if (bLinearFit):
print "Generating linear fit statistics"
else:
print "Not generating linear fit statistics"
if (bNonLinearFit):
print "Generating nonlinear fit statistics"
else:
print "Not generating nonlinear fit statistics"
print "Figures will be named %s" % (figname)
# Shouldn't need to modify below this for standard usage
# ------------------------
K = 2;
kJperkcal = 4.184 # unit conversion factor
nm3perA3 = 0.001
N_k = numpy.zeros([K],int) # number of samples at each state
# check just size of all files
N_size = numpy.zeros(K,int)
filenames = []
for k,T in enumerate(T_k):
filename = options.datafile_directory + '/' + options.datafiles[k]
filenames.append(filename)
print "checking size of \'%s\' file %s..." % (names[k],filenames[k])
infile = open(filename, 'r')
lines = infile.readlines()
infile.close()
N_size[k] = len(lines)
N_max = numpy.max(N_size)
U_kn = numpy.zeros([K,N_max], dtype=numpy.float64) # U_kn[k,n] is the energy of the sample k,n
V_kn = numpy.zeros([K,N_max], dtype=numpy.float64) # V_kn[k,n] is the volume of the sample k,n
N_kn = numpy.zeros([K,N_max], dtype=int) # U_kn[k,n] is the number of the sample k,n
for k in range(K):
# Read contents of file into memory.
print "Reading %s..." % filenames[k]
infile = open(filenames[k], 'r')
lines = infile.readlines()
infile.close()
if (options.filetype == 'flatfile'): # assumes kJ/mol energies, nm3 volumes
U_kn[k,:],V_kn[k,:],N_kn[k,:], N_k[k] = readmdfiles.read_flatfile(lines,type,N_max)
# will need to specify KB for toy models.
elif (options.filetype == 'gromacs'):
U_kn[k,:],V_kn[k,:],N_k[k] = readmdfiles.read_gromacs(lines,type,N_max)
N_kn = None
elif (options.filetype == 'charmm'):
U_kn[k,:],V_kn[k,:],N_k[k] = readmdfiles.read_charmm(lines,type,N_max)
U_kn[k,:] *= kJperkcal
V_kn[k,:] *= nm3perA3
N_kn = None
elif (options.filetype == 'desmond'):
U_kn[k,:],V_kn[k,:],N_k[k] = readmdfiles.read_desmond(lines,type,N_max)
U_kn[k,:] *= kJperkcal
V_kn[k,:] *= nm3perA3
N_kn = None
else:
print "The file type %s isn't defined!" % (options.filetype)
sys.exit()
# compute correlation times for the data
# Determine indices of uncorrelated samples from potential autocorrelation analysis at state k.
print "Now determining correlation time"
if (options.efficiency is None):
g = readmdfiles.getefficiency(N_k,U_kn,V_kn,N_kn,type)
else:
g = numpy.ones(2);
print "statistical inefficiencies taken from input options and are %.3f and %.3f steps" % (options.efficiency[0],options.efficiency[1])
if (options.useg == 'subsample'):
readmdfiles.subsample(N_k,U_kn,V_kn,N_kn,g,type)
else:
print "statistical efficiencies used to scale the statistical uncertained determined from all data"
figname = options.figname
title = options.figname
checkensemble.ProbabilityAnalysis(N_k,type=analysis_type,T_k=T_k,P_k=P_k,mu_k=mu_k,U_kn=U_kn,V_kn=V_kn,N_kn=N_kn,title=title,figname=figname,nbins=nbins,reptype='bootstrap',g=g,nboots=nboots,bMaxwell=(type=='kinetic'),bLinearFit=bLinearFit,bNonLinearFit=bNonLinearFit,bMaxLikelihood=bMaxLikelihood,seed=options.seed,kB=kB)
|
shirtsgroup/checkensemble
|
examples/analyze-md.py
|
Python
|
gpl-2.0
| 11,159
|
[
"CHARMM",
"Desmond",
"Gromacs"
] |
7eefba62262c1604c0b21f3cc63e7d3c7b4bf0cfda23e07597ec1f6f1f3a66ce
|
#!/usr/bin/env python
# encoding: utf-8
"""
Tests that stresses are calculated correctly by Asap
Name: testStress.py
Description: Part of the Asap test suite. Tests that stresses are
calculated correctly by calculating various elastic constants from
strain and stress and comparing them with the same constants
calculated using strains and energies. Can also be imported as a
module, and used to test advanced calculation methods (QC,
parallel).
Usage: python testStress.py
Expected result: Some elastic constants for Copper and Silver,
followed by the text 'ALL TESTS SUCCEEDED'.
The elastic constants are calculated by straining the crystal in
various modes, and fitting to the energies or the stresses.
Generally, the energies seems to be most sensitive to numerical noise,
and requires a rather large strain interval (1% or above), whereas the
stresses are much less sensitive to this. On the other hand,
unlinearities influence the stress fits for large strain intervals. A
strain interval of [-1%, 1%] is a good compromise, where both methods
work.
C11 and C12 calculated 'directly', i.e. in uniaxial strain, is
sensitive to the strain interval when using the energy to fit C11.
Fitting to the stresses work much better.
C11 and C12 can be calculated in an alternative way using a
volume-conserving deformation and fitting to the energies.
All of the above-mentioned calculations are performed.
"""
from asap3 import *
from ase import data, Atoms
from ase.lattice.cubic import Diamond
from ase.lattice.hexagonal import Graphite
#from asap3.md.verlet import VelocityVerlet
#from asap3.md.langevin import Langevin
from asap3.testtools import ReportTest
import numpy as np
defaultstrains = 0.01 * np.array((-1, -0.75, -0.5, -0.25, 0.0,
0.25, 0.5, 0.75, 1.0))
book = {}
book['Diamond'] = {"bookbulk": (442.0, 441.4),
"bookc11": (1079.0, 1068.9),
"bookc12": (124.0, 131.5),
"bookc44": (578.0, 735.8),
'symbol': 'C'}
# Alexey Bosak and Michael Krisch
# European Synchrotron Radiation Facility, BP 220, F-38043 Grenoble Cedex, France
# Marcel Mohr, Janina Maultzsch, and Christian Thomsen
# Institut für Festkörperphysik, Technische Universität Berlin, Hardenbergstrasse 36, 10623 Berlin, Germany
# Received 22 November 2006; revised 11 January 2007; published 30 April 2007
# The five independent elastic moduli of single-crystalline graphite
# are determined using inelastic x-ray scattering. At room temperature
# the elastic moduli are, in units of GPa, C11=1109, C12=139, C13=0,
# C33=38.7, and C44=4.95. Our experimental results are compared with
# predictions of ab initio calculations and previously reported
# incomplete and contradictory data sets. We obtain an upper limit of
# 1.1 TPa for the on-axis Young’s modulus of homogeneous carbon
# nanotube, thus providing important constraints for further
# theoretical advances and quantitative input to model elasticity in
# graphite nanotubes.
# URL:
# http://link.aps.org.globalproxy.cvt.dk/doi/10.1103/PhysRevB.75.153408
# DOI:
# 10.1103/PhysRevB.75.153408
book['Graphite'] = {"bookbulk": (134.3, 140.77),
"bookc11": (1109, 172.75),
"bookc12": (139, 124.32),
"bookc13": (0, 0),
"bookc33": (38.7, 38.7),
"bookc44": (75.7, 81.17),
'symbol': 'C'}
book['Silicon'] = {"bookbulk": (100.0, 97.8),
"bookc11": (124.0, 142.6),
"bookc12": (93.7, 75.5),
"bookc44": (46.12, 118.9),
'symbol': 'Si'}
book['Germanium'] = {"bookbulk": (103.8, 99.27),
"bookc11": (124.0, 124.45),
"bookc12": (93.7, 86.27),
"bookc44": (46.12, 53.44),
'symbol': 'Ge'}
defaultstrains = 0.005 * np.array((-1, -0.75, -0.5, -0.25, 0.0,
0.25, 0.5, 0.75, 1.0))
def polynomialLeastSquaresFit(parameters, data, max_iterations=None,
stopping_limit = 0.0001):
"""Least-square fit to a polynomial.
Least-squares fit to a polynomial whose order is defined by
the number of parameter values.
This is a wrapper function replacing the similar function in
Scientific.Functions.LeastSquares
"""
order = len(parameters)
return (np.polyfit(data[:,0], data[:,1], order)[::-1], None)
def makefits(atoms, strains, indices, shear=0):
"""Do the deformations, and get fits to energies and stresses.
atoms is a list of atoms.
strains are the strains as floating point numbers
indices is a list of indices for the strain components to be used.
For bulk modulus, it will be ((0,0), (1,1), (2,2)) etc.
"""
energies = []
basis = atoms.get_cell()
vol = np.linalg.det(basis)
for epsilon in strains:
if shear:
adjustment = np.zeros((3,3), np.float)
for idx in indices:
adjustment[idx[0]] += idx[2] * epsilon * basis[idx[1]]
atoms.set_cell(adjustment + basis, scale_atoms=True)
else:
scaling = np.ones((3,3), np.float)
for idx in indices:
scaling[idx[0]] += idx[1]*epsilon
atoms.set_cell(scaling * basis, scale_atoms=True)
energy = atoms.get_potential_energy()
#print ""
#print epsilon, energy/len(atoms)
energies.append((epsilon, energy/vol))
atoms.set_cell(basis, scale_atoms=True)
energies = np.array(energies)
#print "Energies:", energies
energyfit = polynomialLeastSquaresFit((0.0, 0.0, 0.0), energies)
#print "EnergyFit:", energyfit
return energyfit
def findlatticeconst(atoms, latticeconstant):
"""Adjust the volume so the atoms have their lowest energy."""
basis = atoms.get_cell()
strains = 0.01 * np.array((-0.2, -0.15, -0.1, -0.05, 0.0, 0.05, 0.1,
0.15, 0.2))
for i in range(5):
energyfit = makefits(atoms, strains,
(((0,0), 1), ((1,1), 1), ((2,2),1)))
pressurefit = -energyfit[0]
dilation = -pressurefit[1]/(2*pressurefit[2])
print "Optimizing lattice constant:", latticeconstant, "->", latticeconstant*(1+dilation/3)
latticeconstant = latticeconstant*(1+dilation/3)
basis = (1+dilation/3) * basis
atoms.set_cell(basis, scale_atoms=True)
def elasticconstants(atoms, name, bookbulk, bookc11, bookc12, bookc44,
bookc13=None, bookc33=None,
fitfact=1.0, fitfact2=1.0):
"""Check the elastic constants."""
if bookc13 is None:
bookc13 = bookc12
if bookc33 is None:
bookc33 = bookc11
energyfit = makefits(atoms, defaultstrains,
(((0,0), 1), ((1,1), 1), ((2,2),1)))
bm = 2.0/9.0 * energyfit[0][2] / units.GPa
print ""
print "Calculation for", name
print " Bulk modulus from energies:", bm
print " Textbook value:", bookbulk[0]
print ""
ReportTest("Bulk modulus (%s, energies)" % name, bm, bookbulk[1],
0.1*fitfact)
energyfit = makefits(atoms, defaultstrains, (((0,0),1),))
c11 = 2.0 * energyfit[0][2] / units.GPa
energyfit = makefits(atoms, defaultstrains,
(((0,0),1), ((1,1), -0.5), ((2,2), -0.5)))
# C11 - C12
c11mc12 = 4.0/3.0 * energyfit[0][2] / units.GPa
c12 = c11 - c11mc12
print ""
print "Calculation for", name
print " C_11 from energies:", c11
print " C_12 from energies:", c12
print " Textbook values: C_11 = %.1f; C_12 = %.1f" % (bookc11[0],
bookc12[0])
# B = (C11 + 2 C12)/3
altc11 = (3*bm + 2*c11mc12) / 3.0
altc12 = (3*bm - c11mc12) / 3.0
print " C_11 from alternative energies:", altc11
print " C_12 from alternative energies:", altc12
print " Bulk modulus from C_11 and C_12:", (altc11 + 2 * altc12) / 3.0
energyfit = makefits(atoms, defaultstrains, (((2,2),1),))
c33 = 2.0 * energyfit[0][2] / units.GPa
# C11 - C12
c11mc12 = 4.0/3.0 * energyfit[0][2] / units.GPa
c12 = c11 - c11mc12
print " C_33 from energies:", c11
print " Textbook values: C_33 = %.1f" % (bookc33[0],)
print ""
ReportTest("C11 from energies", c11, bookc11[1], 0.1*fitfact)
ReportTest("C11 from alt. energies", altc11, c11, 3.0*fitfact)
ReportTest("C12 from energies", c12, bookc12[1], 0.5*fitfact)
ReportTest("C12 from alt. energies", altc12, c12, 3.0*fitfact)
ReportTest("C33 from energies", c33, bookc33[1], 0.1*fitfact)
print ""
energyfit = makefits(atoms, defaultstrains,
(((2,1), (2,2), 1), ((1,2), (1,1), 1)), shear=1)
c44 = 0.5 * energyfit[0][2] / units.GPa
print ""
print "Calculation for", name
print " C_44 from energies:", c44
print " Textbook value:", bookc44[0]
print ""
ReportTest("C44 from energies", c44, bookc44[1], 0.1*fitfact)
print ""
print "Testing other shear modes:"
energyfit = makefits(atoms, defaultstrains,
(((2,0), (2,2), 1), ((0,2), (0,0), 1)), shear=1)
c44x = 0.5 * energyfit[0][2] / units.GPa
ReportTest("C44(alt) from energies", c44x, c44, 0.2*fitfact2)
energyfit = makefits(atoms, defaultstrains,
(((1,0), (1,1), 1), ((0,1), (0,0), 1)), shear=1)
c44x = 0.5 * energyfit[0][2] / units.GPa
ReportTest("C44(alt) from energies", c44x, c44, 0.1*fitfact)
print ""
print_version(1)
for element in ('Diamond', 'Graphite', 'Silicon'):
symbol = book[element]['symbol']
print "*** Running test on %s ***" % element
z = data.atomic_numbers[symbol]
if element == 'Graphite':
latconst = 2.46
intial = Graphite(directions=[[2,-1,-1,0], [0,1,-1,0], [0,0,0,1]],
symbol=symbol, size=(7,7,5),
latticeconstant = {'a':2.46, 'c':6.71})
#view(initial)
ReportTest("Number of atoms", len(initial), 7*7*5*8, 0)
else:
latconst = data.reference_states[z]['a']
initial = Diamond(directions=[[1,0,0],[0,1,0],[0,0,1]],
symbol=symbol, size=(6,6,6))
ReportTest("Number of atoms", len(initial), 6*6*6*8, 0)
atoms = Atoms(initial)
#view(atoms)
atoms.set_calculator(BrennerPotential())
atoms.set_momenta(np.zeros((len(atoms),3), np.float))
findlatticeconst(atoms, latconst)
del book[element]['symbol']
elasticconstants(atoms, element, **book[element])
ReportTest.Summary()
|
auag92/n2dm
|
Asap-3.8.4/Test/BrennerStress.py
|
Python
|
mit
| 10,820
|
[
"ASE",
"CRYSTAL"
] |
d8facc7d363fb08489285902c886736514bf4c9af558810c352db10256041110
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from __future__ import print_function, division
import os
import mdtraj
from numpy.testing import Tester
class MDTrajTester(Tester):
def _show_system_info(self):
super(MDTrajTester, self)._show_system_info()
print('mdtraj version %s' % mdtraj.version.version)
print('mdtraj is installed in %s' % os.path.dirname(mdtraj.__file__))
try:
import tables
print('tables version %s' % tables.__version__)
print('tables hdf5 version %s' % tables.hdf5Version)
except ImportError:
print('tables is not installed')
try:
import netCDF4
print('netCDF4 version %s' % netCDF4.__version__)
print('netCDF4 lib version %s' % netCDF4.getlibversion())
except ImportError:
print('netCDF4 not installed')
|
kyleabeauchamp/mdtraj
|
mdtraj/testing/nosetester.py
|
Python
|
lgpl-2.1
| 1,860
|
[
"MDTraj"
] |
cc4293ea4d74303d5d62955f4ea7fddc9ba1a794ea91928d57f593b6c18f12a0
|
#!/usr/bin/python
########################################################################
# 19 May 2014
# Patrick Lombard, Centre for Stem Stem Research
# Core Bioinformatics Group
# University of Cambridge
# All right reserved.
########################################################################
import subprocess
import sys, re, os
import ConfigParser
import itertools
import argparse
from collections import defaultdict
from pyrnatools.tools import gfold, deseq2
from multiprocessing import Pool
import tempfile
def annotate_sam(bam_file, gtf_file, stranded, outdir):
print "==> Counting sam file...\n"
if bam_file.endswith(".bam"):
name = os.path.basename(bam_file)
count_file = re.sub(".bam$", ".count", name)
command = "htseq-count --stranded={} --quiet -f bam {} {} > {}/{}".format(stranded, bam_file, gtf_file, outdir, count_file)
else:
name = os.path.basename(bam_file)
count_file = re.sub(".sam$", ".count", name) #Just in case supplied file is sam!
command = "htseq-count --stranded={} --quiet -f sam {} {} > {}/{}".format(stranded, bam_file, gtf_file, outdir, count_file)
subprocess.call(command, shell=True)
def join_counts(idict, outdir):
data = defaultdict(list)
output = open("{}/combined_counts.tsv".format(outdir), "w")
output.write("ID"),
for bam in sorted(idict):
if bam.endswith(".bam"):
name = os.path.basename(bam)
count_file = re.sub(".bam$", ".count", name)
else:
name = os.path.basename(bam)
count_file = re.sub(".sam$", ".count", name)
output.write("\t{}".format(bam)),
with open("{}/{}".format(outdir, count_file)) as f:
for line in f:
line = line.rstrip()
word = line.split("\t")
if word[0].startswith("__"):
pass
else:
data[word[0]].append(word[1])
output.write("\n"),
for key2 in sorted(data):
data2 = data[key2]
output.write(key2+"\t" + "\t".join(data2) + "\n"),
output.close()
def featurecounts(conditions, threads, gtf_file, stranded, paired, outfile, bam=None):
# -s 0 (unstranded), 1 (stranded) and 2 (reversely stranded). 0 by default
command = "featureCounts -a {} -T {} -o {}".format(gtf_file, threads, outfile)
command = command.split()
if paired:
command.append(" -p")
if stranded == "yes":
command.append(" -s 1")
elif stranded == "no":
command.append("-s 0")
elif stranded == "reverse":
command.append("-s 2")
if bam:
command.append(bam)
else:
command.extend(sorted(list(conditions.keys())))
print command
subprocess.call(command)
#eatureCounts -p -a /home/patrick/Reference_Genomes/mm10/Ensembl/76/Mus_musculus.GRCm38.76_ucsc.gtf -o tmp.count
def run_gfold_count(args):
return gfold.run_gfold_c(*args)
def anno_function(args):
return annotate_sam(*args)
def ConfigSectionMap(section, Config):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
def convert_gtf_to_ucsc(igtf):
a = tempfile.NamedTemporaryFile(delete=False)
with open(igtf) as f:
for line in f:
new_chr = None
line = line.rstrip()
word = line.split("\t")
if line.startswith("#"):
pass
if re.match(r"^\d", word[0]):
new_chr = "chr" + word[0]
elif re.match(r"^X", word[0]):
new_chr = "chrX"
elif re.match(r"^Y", word[0]):
new_chr = "chrY"
elif word[0] == "MT":
new_chr = "chrM"
else:
pass
if new_chr:
a.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(new_chr, word[1], word[2], word[3], word[4], word[5], word[6], word[7], word[8])),
a.close()
return a.name
def infer(bam, genome):
path1 = "/home/patrick/Reference_Genomes/pyngspipe_references/"
if genome == "hg19":
refbed = path1 + "hg19/hg19_Ensembl.bed"
elif genome == "mm10":
refbed = path1 + "mm10/mm10_Ensembl.bed"
infercommand = "infer_experiment.py -i {} -r {} > infer_res.txt".format(bam, refbed)
subprocess.call(infercommand, shell=True)
def main():
parser = argparse.ArgumentParser(description='Counts features from BAM files\n')
subparsers = parser.add_subparsers(help='Programs included',dest="subparser_name")
htseq_parser = subparsers.add_parser('htseq', help="Runs HTseq count")
htseq_parser.add_argument('-c','--config', help='Config file containing [Conditions] with bam files as keys and colnames as values', required=False)
htseq_parser.add_argument('-i','--input', help='Input bam file', required=False)
htseq_parser.add_argument('-g','--gtf', help='GTF file', required=False)
htseq_parser.add_argument('-s','--stranded', help='Option for HTSeq, default=yes', default="yes", required=False)
htseq_parser.add_argument('-t','--threads', help='Number of threads, default=8', default=8, required=False)
htseq_parser.add_argument('-o','--output', help='Output counts file directory, default is current directory', required=False) #Will output all counts files and combined file if specified
htseq_parser.add_argument('-e', action='store_true', help='Converts the GTF to UCSC format', required=False)
gfold_parser = subparsers.add_parser('gfold', help="Runs GFOLD Count")
gfold_parser.add_argument('-c','--config', help='Config file containing [Conditions], please see documentation for usage!\nPlease use a unique name for every input bam file!', required=False)
gfold_parser.add_argument('-i','--input', help='Input bam file', required=False)
gfold_parser.add_argument('-n',action='store_true', help='Gapdh Normlisation', required=False)
gfold_parser.add_argument('-g','--gtf', help='GTF file', required=False)
gfold_parser.add_argument('-t','--threads', help='Number of threads, default=8', default=8, required=False)
gfold_parser.add_argument('-o','--output', help='Output counts file directory, default is current directory', required=False)
feat_parser = subparsers.add_parser('feat', help="Runs featureCount")
feat_parser.add_argument('-c','--config', help='Config file containing [Conditions] with bam files as keys and colnames as values', required=False)
feat_parser.add_argument('-i','--input', help='Input bam file', required=False)
feat_parser.add_argument('-g','--gtf', help='GTF file', required=False)
feat_parser.add_argument('-s','--stranded', help='Option for featueCount, default=yes', default="yes", required=False)
feat_parser.add_argument('-t','--threads', help='Number of threads, default=8', default=8, required=False)
feat_parser.add_argument('-o','--output', help='Output counts file', required=False) #Will output all counts files and combined file if specified
feat_parser.add_argument('-p', action='store_true', help='Samples are paired end', required=False)
#feat_parser.add_argument('-e', action='store_true', help='Converts the GTF to UCSC format', required=False)
infer_parser = subparsers.add_parser('infer', help="Runs infer_experiment.py")
infer_parser.add_argument('-i','--input', help='Input bam file', required=True)
infer_parser.add_argument('-g','--genome', help='Options are hg19/mm10', required=True)
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = vars(parser.parse_args())
if args["output"]:
output = args["output"]
else:
output = os.getcwd()
if args["subparser_name"] == "gfold":
if args["config"]:
Config = ConfigParser.ConfigParser()
Config.optionxform = str
Config.read(args["config"])
#Read design matrix and create list of conditions and directories
conditions = ConfigSectionMap("Conditions", Config)
pool = Pool(int(args["threads"]))
pool.map(run_gfold_count, itertools.izip(list(conditions.keys()), itertools.repeat(args["gtf"]), itertools.repeat(output))) ##Running annotation in parallel
pool.close()
pool.join()
elif args["input"]:
gfold.run_gfold_c(args["input"], args["gtf"], output)
elif args["subparser_name"] == "feat":
if args["config"]:
Config = ConfigParser.ConfigParser()
Config.optionxform = str
Config.read(args["config"])
conditions = ConfigSectionMap("Conditions", Config)
featurecounts(conditions, int(args["threads"]), args["gtf"], args["stranded"], args["p"], args["output"])
elif args["input"]:
featurecounts(None, int(args["threads"]), args["gtf"], args["stranded"], args["p"], args["output"], bam=args["input"])
elif args["subparser_name"] == "htseq":
if args["e"]:
gtf = convert_gtf_to_ucsc(args["gtf"])
else:
gtf = args["gtf"]
if args["config"]:
Config = ConfigParser.ConfigParser()
Config.optionxform = str
Config.read(args["config"])
#Read design matrix and create list of conditions and directories
conditions = ConfigSectionMap("Conditions", Config)
pool = Pool(int(args["threads"]))
pool.map(anno_function, itertools.izip(list(conditions.keys()), itertools.repeat(gtf), itertools.repeat(args["stranded"]), itertools.repeat(output))) ##Running annotation in parallel
pool.close()
pool.join()
join_counts(conditions, output)
elif args["input"]:
annotate_sam(args["input"], gtf, args["stranded"], output)
elif args["subparser_name"] == "infer":
infer(args["bam"], args["genome"])
|
pdl30/pyrnatools
|
pyrnatools/counting.py
|
Python
|
gpl-2.0
| 9,128
|
[
"HTSeq"
] |
d2b16321dc7717a78c033b6cd6212e2582601bb9dcd77b8bc1779100fc935e32
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines classes representing non-periodic and periodic sites.
"""
import collections
import json
from typing import Optional, Tuple, Union
import numpy as np
from monty.dev import deprecated
from monty.json import MontyDecoder, MontyEncoder, MSONable
from pymatgen.core.composition import Composition
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import DummySpecies, Element, Species, get_el_sp
from pymatgen.util.coord import pbc_diff
from pymatgen.util.typing import ArrayLike, SpeciesLike, CompositionLike
class Site(collections.abc.Hashable, MSONable):
"""
A generalized *non-periodic* site. This is essentially a composition
at a point in space, with some optional properties associated with it. A
Composition is used to represent the atoms and occupancy, which allows for
disordered site representation. Coords are given in standard cartesian
coordinates.
"""
position_atol = 1e-5
def __init__(
self,
species: Union[SpeciesLike, CompositionLike],
coords: ArrayLike,
properties: dict = None,
skip_checks: bool = False,
):
"""
Creates a non-periodic Site.
:param species: Species on the site. Can be:
i. A Composition-type object (preferred)
ii. An element / species specified either as a string
symbols, e.g. "Li", "Fe2+", "P" or atomic numbers,
e.g., 3, 56, or actual Element or Species objects.
iii.Dict of elements/species and occupancies, e.g.,
{"Fe" : 0.5, "Mn":0.5}. This allows the setup of
disordered structures.
:param coords: Cartesian coordinates of site.
:param properties: Properties associated with the site as a dict, e.g.
{"magmom": 5}. Defaults to None.
:param skip_checks: Whether to ignore all the usual checks and just
create the site. Use this if the Site is created in a controlled
manner and speed is desired.
"""
if not skip_checks:
if not isinstance(species, Composition):
try:
species = Composition({get_el_sp(species): 1})
except TypeError:
species = Composition(species)
totaloccu = species.num_atoms
if totaloccu > 1 + Composition.amount_tolerance:
raise ValueError("Species occupancies sum to more than 1!")
coords = np.array(coords)
self._species: Composition = species # type: ignore
self.coords: np.ndarray = coords # type: ignore
self.properties: dict = properties or {}
def __getattr__(self, a):
# overriding getattr doens't play nice with pickle, so we
# can't use self._properties
p = object.__getattribute__(self, "properties")
if a in p:
return p[a]
raise AttributeError(a)
@property
def species(self) -> Composition:
"""
:return: The species on the site as a composition, e.g., Fe0.5Mn0.5.
"""
return self._species # type: ignore
@species.setter
def species(self, species: Union[SpeciesLike, CompositionLike]):
if not isinstance(species, Composition):
try:
species = Composition({get_el_sp(species): 1})
except TypeError:
species = Composition(species)
totaloccu = species.num_atoms
if totaloccu > 1 + Composition.amount_tolerance:
raise ValueError("Species occupancies sum to more than 1!")
self._species = species
@property
def x(self) -> float:
"""
Cartesian x coordinate
"""
return self.coords[0] # type: ignore
@x.setter
def x(self, x: float):
self.coords[0] = x # type: ignore
@property
def y(self) -> float:
"""
Cartesian y coordinate
"""
return self.coords[1] # type: ignore
@y.setter
def y(self, y: float):
self.coords[1] = y # type: ignore
@property
def z(self) -> float:
"""
Cartesian z coordinate
"""
return self.coords[2] # type: ignore
@z.setter
def z(self, z: float):
self.coords[2] = z # type: ignore
def distance(self, other) -> float:
"""
Get distance between two sites.
Args:
other: Other site.
Returns:
Distance (float)
"""
return np.linalg.norm(other.coords - self.coords)
def distance_from_point(self, pt) -> float:
"""
Returns distance between the site and a point in space.
Args:
pt: Cartesian coordinates of point.
Returns:
Distance (float)
"""
return np.linalg.norm(np.array(pt) - self.coords)
@property
def species_string(self) -> str:
"""
String representation of species on the site.
"""
if self.is_ordered:
return list(self.species.keys())[0].__str__()
sorted_species = sorted(self.species.keys())
return ", ".join([f"{sp}:{self.species[sp]:.3f}" for sp in sorted_species])
@property # type: ignore
@deprecated(message="Use site.species instead. This will be deprecated with effect from pymatgen 2020.")
def species_and_occu(self):
"""
The species at the site, i.e., a Composition mapping type of
element/species to occupancy.
"""
return self.species
@property
def specie(self) -> Union[Element, Species, DummySpecies]:
"""
The Species/Element at the site. Only works for ordered sites. Otherwise
an AttributeError is raised. Use this property sparingly. Robust
design should make use of the property species instead. Note that the
singular of species is also species. So the choice of this variable
name is governed by programmatic concerns as opposed to grammar.
Raises:
AttributeError if Site is not ordered.
"""
if not self.is_ordered:
raise AttributeError("specie property only works for ordered sites!")
return list(self.species.keys())[0]
@property
def is_ordered(self) -> bool:
"""
True if site is an ordered site, i.e., with a single species with
occupancy 1.
"""
totaloccu = self.species.num_atoms
return totaloccu == 1 and len(self.species) == 1
def __getitem__(self, el):
"""
Get the occupancy for element
"""
return self.species[el]
def __eq__(self, other):
"""
Site is equal to another site if the species and occupancies are the
same, and the coordinates are the same to some tolerance. numpy
function `allclose` is used to determine if coordinates are close.
"""
if other is None:
return False
return (
self.species == other.species
and np.allclose(self.coords, other.coords, atol=Site.position_atol)
and self.properties == other.properties
)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
"""
Minimally effective hash function that just distinguishes between Sites
with different elements.
"""
return sum(el.Z for el in self.species.keys())
def __contains__(self, el):
return el in self.species
def __repr__(self):
return "Site: {} ({:.4f}, {:.4f}, {:.4f})".format(self.species_string, *self.coords)
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity. Very
useful for getting correct formulas. For example, FeO4PLi is
automatically sorted in LiFePO4.
"""
if self.species.average_electroneg < other.species.average_electroneg:
return True
if self.species.average_electroneg > other.species.average_electroneg:
return False
if self.species_string < other.species_string:
return True
if self.species_string > other.species_string:
return False
return False
def __str__(self):
return f"{self.coords} {self.species_string}"
def as_dict(self) -> dict:
"""
Json-serializable dict representation for Site.
"""
species_list = []
for spec, occu in self.species.items():
d = spec.as_dict()
del d["@module"]
del d["@class"]
d["occu"] = occu
species_list.append(d)
d = {
"name": self.species_string,
"species": species_list,
"xyz": [float(c) for c in self.coords], # type: ignore
"properties": self.properties,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
}
if self.properties:
d["properties"] = self.properties
return d
@classmethod
def from_dict(cls, d: dict) -> "Site":
"""
Create Site from dict representation
"""
atoms_n_occu = {}
for sp_occu in d["species"]:
if "oxidation_state" in sp_occu and Element.is_valid_symbol(sp_occu["element"]):
sp = Species.from_dict(sp_occu)
elif "oxidation_state" in sp_occu:
sp = DummySpecies.from_dict(sp_occu)
else:
sp = Element(sp_occu["element"]) # type: ignore
atoms_n_occu[sp] = sp_occu["occu"]
props = d.get("properties", None)
if props is not None:
for key in props.keys():
props[key] = json.loads(json.dumps(props[key], cls=MontyEncoder), cls=MontyDecoder)
return cls(atoms_n_occu, d["xyz"], properties=props)
class PeriodicSite(Site, MSONable):
"""
Extension of generic Site object to periodic systems.
PeriodicSite includes a lattice system.
"""
def __init__(
self,
species: Union[SpeciesLike, CompositionLike],
coords: ArrayLike,
lattice: Lattice,
to_unit_cell: bool = False,
coords_are_cartesian: bool = False,
properties: dict = None,
skip_checks: bool = False,
):
"""
Create a periodic site.
:param species: Species on the site. Can be:
i. A Composition-type object (preferred)
ii. An element / species specified either as a string
symbols, e.g. "Li", "Fe2+", "P" or atomic numbers,
e.g., 3, 56, or actual Element or Species objects.
iii.Dict of elements/species and occupancies, e.g.,
{"Fe" : 0.5, "Mn":0.5}. This allows the setup of
disordered structures.
:param coords: Cartesian coordinates of site.
:param lattice: Lattice associated with the site.
:param to_unit_cell: Translates fractional coordinate to the
basic unit cell, i.e. all fractional coordinates satisfy 0
<= a < 1. Defaults to False.
:param coords_are_cartesian: Set to True if you are providing
cartesian coordinates. Defaults to False.
:param properties: Properties associated with the site as a dict, e.g.
{"magmom": 5}. Defaults to None.
:param skip_checks: Whether to ignore all the usual checks and just
create the site. Use this if the PeriodicSite is created in a
controlled manner and speed is desired.
"""
if coords_are_cartesian:
frac_coords = lattice.get_fractional_coords(coords)
else:
frac_coords = coords # type: ignore
if to_unit_cell:
frac_coords = np.mod(frac_coords, 1)
if not skip_checks:
frac_coords = np.array(frac_coords)
if not isinstance(species, Composition):
try:
species = Composition({get_el_sp(species): 1})
except TypeError:
species = Composition(species)
totaloccu = species.num_atoms
if totaloccu > 1 + Composition.amount_tolerance:
raise ValueError("Species occupancies sum to more than 1!")
self._lattice: Lattice = lattice
self._frac_coords: ArrayLike = frac_coords
self._species: Composition = species # type: ignore
self._coords: Optional[np.ndarray] = None
self.properties: dict = properties or {}
def __hash__(self):
"""
Minimally effective hash function that just distinguishes between Sites
with different elements.
"""
return sum(el.Z for el in self.species.keys())
@property
def lattice(self) -> Lattice:
"""
Lattice associated with PeriodicSite
"""
return self._lattice
@lattice.setter
def lattice(self, lattice: Lattice):
"""
Sets Lattice associated with PeriodicSite
"""
self._lattice = lattice
self._coords = self._lattice.get_cartesian_coords(self._frac_coords)
@property # type: ignore
def coords(self) -> np.ndarray: # type: ignore
"""
Cartesian coordinates
"""
if self._coords is None:
self._coords = self._lattice.get_cartesian_coords(self._frac_coords)
return self._coords
@coords.setter
def coords(self, coords):
"""
Set Cartesian coordinates
"""
self._coords = np.array(coords)
self._frac_coords = self._lattice.get_fractional_coords(self._coords)
@property
def frac_coords(self) -> np.ndarray:
"""
Fractional coordinates
"""
return self._frac_coords # type: ignore
@frac_coords.setter
def frac_coords(self, frac_coords):
"""
Set fractional coordinates
"""
self._frac_coords = np.array(frac_coords)
self._coords = self._lattice.get_cartesian_coords(self._frac_coords)
@property
def a(self) -> float:
"""
Fractional a coordinate
"""
return self._frac_coords[0] # type: ignore
@a.setter
def a(self, a: float):
self._frac_coords[0] = a # type: ignore
self._coords = self._lattice.get_cartesian_coords(self._frac_coords)
@property
def b(self) -> float:
"""
Fractional b coordinate
"""
return self._frac_coords[1] # type: ignore
@b.setter
def b(self, b: float):
self._frac_coords[1] = b # type: ignore
self._coords = self._lattice.get_cartesian_coords(self._frac_coords)
@property
def c(self) -> float:
"""
Fractional c coordinate
"""
return self._frac_coords[2] # type: ignore
@c.setter
def c(self, c: float):
self._frac_coords[2] = c # type: ignore
self._coords = self._lattice.get_cartesian_coords(self._frac_coords)
@property
def x(self) -> float:
"""
Cartesian x coordinate
"""
return self.coords[0]
@x.setter
def x(self, x: float):
self.coords[0] = x
self._frac_coords = self._lattice.get_fractional_coords(self.coords)
@property
def y(self) -> float:
"""
Cartesian y coordinate
"""
return self.coords[1]
@y.setter
def y(self, y: float):
self.coords[1] = y
self._frac_coords = self._lattice.get_fractional_coords(self.coords)
@property
def z(self) -> float:
"""
Cartesian z coordinate
"""
return self.coords[2]
@z.setter
def z(self, z: float):
self.coords[2] = z
self._frac_coords = self._lattice.get_fractional_coords(self.coords)
def to_unit_cell(self, in_place=False) -> Optional["PeriodicSite"]:
"""
Move frac coords to within the unit cell cell.
"""
frac_coords = np.mod(self.frac_coords, 1)
if in_place:
self.frac_coords = frac_coords
return None
return PeriodicSite(self.species, frac_coords, self.lattice, properties=self.properties)
def is_periodic_image(self, other: "PeriodicSite", tolerance: float = 1e-8, check_lattice: bool = True) -> bool:
"""
Returns True if sites are periodic images of each other.
Args:
other (PeriodicSite): Other site
tolerance (float): Tolerance to compare fractional coordinates
check_lattice (bool): Whether to check if the two sites have the
same lattice.
Returns:
bool: True if sites are periodic images of each other.
"""
if check_lattice and self.lattice != other.lattice:
return False
if self.species != other.species:
return False
frac_diff = pbc_diff(self.frac_coords, other.frac_coords)
return np.allclose(frac_diff, [0, 0, 0], atol=tolerance)
def __eq__(self, other):
return (
self.species == other.species
and self.lattice == other.lattice
and np.allclose(self.coords, other.coords, atol=Site.position_atol)
and self.properties == other.properties
)
def __ne__(self, other):
return not self.__eq__(other)
def distance_and_image_from_frac_coords(
self, fcoords: ArrayLike, jimage: Optional[ArrayLike] = None
) -> Tuple[float, np.ndarray]:
"""
Gets distance between site and a fractional coordinate assuming
periodic boundary conditions. If the index jimage of two sites atom j
is not specified it selects the j image nearest to the i atom and
returns the distance and jimage indices in terms of lattice vector
translations. If the index jimage of atom j is specified it returns the
distance between the i atom and the specified jimage atom, the given
jimage is also returned.
Args:
fcoords (3x1 array): fcoords to get distance from.
jimage (3x1 array): Specific periodic image in terms of
lattice translations, e.g., [1,0,0] implies to take periodic
image that is one a-lattice vector away. If jimage is None,
the image that is nearest to the site is found.
Returns:
(distance, jimage): distance and periodic lattice translations
of the other site for which the distance applies.
"""
return self.lattice.get_distance_and_image(self.frac_coords, fcoords, jimage=jimage)
def distance_and_image(self, other: "PeriodicSite", jimage: Optional[ArrayLike] = None) -> Tuple[float, np.ndarray]:
"""
Gets distance and instance between two sites assuming periodic boundary
conditions. If the index jimage of two sites atom j is not specified it
selects the j image nearest to the i atom and returns the distance and
jimage indices in terms of lattice vector translations. If the index
jimage of atom j is specified it returns the distance between the ith
atom and the specified jimage atom, the given jimage is also returned.
Args:
other (PeriodicSite): Other site to get distance from.
jimage (3x1 array): Specific periodic image in terms of lattice
translations, e.g., [1,0,0] implies to take periodic image
that is one a-lattice vector away. If jimage is None,
the image that is nearest to the site is found.
Returns:
(distance, jimage): distance and periodic lattice translations
of the other site for which the distance applies.
"""
return self.distance_and_image_from_frac_coords(other.frac_coords, jimage)
def distance(self, other: "PeriodicSite", jimage: Optional[ArrayLike] = None):
"""
Get distance between two sites assuming periodic boundary conditions.
Args:
other (PeriodicSite): Other site to get distance from.
jimage (3x1 array): Specific periodic image in terms of lattice
translations, e.g., [1,0,0] implies to take periodic image
that is one a-lattice vector away. If jimage is None,
the image that is nearest to the site is found.
Returns:
distance (float): Distance between the two sites
"""
return self.distance_and_image(other, jimage)[0]
def __repr__(self):
return "PeriodicSite: {} ({:.4f}, {:.4f}, {:.4f}) [{:.4f}, {:.4f}, {:.4f}]".format(
self.species_string, self.coords[0], self.coords[1], self.coords[2], *self._frac_coords
)
def as_dict(self, verbosity: int = 0) -> dict:
"""
Json-serializable dict representation of PeriodicSite.
Args:
verbosity (int): Verbosity level. Default of 0 only includes the
matrix representation. Set to 1 for more details such as
cartesian coordinates, etc.
"""
species_list = []
for spec, occu in self._species.items():
d = spec.as_dict()
del d["@module"]
del d["@class"]
d["occu"] = occu
species_list.append(d)
d = {
"species": species_list,
"abc": [float(c) for c in self._frac_coords], # type: ignore
"lattice": self._lattice.as_dict(verbosity=verbosity),
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
}
if verbosity > 0:
d["xyz"] = [float(c) for c in self.coords]
d["label"] = self.species_string
d["properties"] = self.properties
return d
@classmethod
def from_dict(cls, d, lattice=None) -> "PeriodicSite":
"""
Create PeriodicSite from dict representation.
Args:
d (dict): dict representation of PeriodicSite
lattice: Optional lattice to override lattice specified in d.
Useful for ensuring all sites in a structure share the same
lattice.
Returns:
PeriodicSite
"""
species = {}
for sp_occu in d["species"]:
if "oxidation_state" in sp_occu and Element.is_valid_symbol(sp_occu["element"]):
sp = Species.from_dict(sp_occu)
elif "oxidation_state" in sp_occu:
sp = DummySpecies.from_dict(sp_occu)
else:
sp = Element(sp_occu["element"]) # type: ignore
species[sp] = sp_occu["occu"]
props = d.get("properties", None)
if props is not None:
for key in props.keys():
props[key] = json.loads(json.dumps(props[key], cls=MontyEncoder), cls=MontyDecoder)
lattice = lattice if lattice else Lattice.from_dict(d["lattice"])
return cls(species, d["abc"], lattice, properties=props)
|
vorwerkc/pymatgen
|
pymatgen/core/sites.py
|
Python
|
mit
| 23,312
|
[
"pymatgen"
] |
6ef6a519ab6ce28c0b866785b78c1e8a4a077c9dca98051040c98ccdb84c5090
|
# NOTE: This example uses the next generation Twilio helper library - for more
# information on how to download and install this version, visit
# https://www.twilio.com/docs/libraries/python
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
account = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
token = "your_auth_token"
client = Client(account, token)
binding = client.notify.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.bindings("BSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
print(binding.sid)
|
teoreteetik/api-snippets
|
notifications/rest/bindings/retrieve-binding/retrieve-binding.6.x.py
|
Python
|
mit
| 545
|
[
"VisIt"
] |
4f42abef7f100e0b78a0650d9816a2b94afc17fe3c00e4da809a0c993f558e48
|
from setuptools import setup
setup(
name='galaxy-api-tools',
version='0.1.0',
author='Carlos Borroto',
author_email='carlos.borroto@gmail.com',
url="https://github.com/cjav/galaxy-api-tools",
packages=['galaxyapitools'],
scripts=["scripts/galaxy-api-tools"],
description='The Galaxy API tools serve as a client interface to Galaxy server.',
long_description=open('README.md').read(),
install_requires=[
"docopt",
"bioblend",
],
)
|
cjav/galaxy-api-tools
|
setup.py
|
Python
|
bsd-2-clause
| 491
|
[
"Galaxy"
] |
e79984de5909c6e521a754d3b58024115df9aa6a2b86efdae551b4747d797be5
|
"""Utilities used in the Kadenze Academy Course on Deep Learning w/ Tensorflow.
Creative Applications of Deep Learning w/ Tensorflow.
Kadenze, Inc.
Parag K. Mital
Copyright Parag K. Mital, June 2016.
"""
import matplotlib.pyplot as plt
import tensorflow as tf
import urllib
import numpy as np
import zipfile
import os
from scipy.io import wavfile
def download(path):
"""Use urllib to download a file.
Parameters
----------
path : str
Url to download
Returns
-------
path : str
Location of downloaded file.
"""
import os
from six.moves import urllib
fname = path.split('/')[-1]
if os.path.exists(fname):
return fname
print('Downloading ' + path)
def progress(count, block_size, total_size):
if count % 20 == 0:
print('Downloaded %02.02f/%02.02f MB' % (
count * block_size / 1024.0 / 1024.0,
total_size / 1024.0 / 1024.0), end='\r')
filepath, _ = urllib.request.urlretrieve(
path, filename=fname, reporthook=progress)
return filepath
def download_and_extract_tar(path, dst):
"""Download and extract a tar file.
Parameters
----------
path : str
Url to tar file to download.
dst : str
Location to save tar file contents.
"""
import tarfile
filepath = download(path)
if not os.path.exists(dst):
os.makedirs(dst)
tarfile.open(filepath, 'r:gz').extractall(dst)
def download_and_extract_zip(path, dst):
"""Download and extract a zip file.
Parameters
----------
path : str
Url to zip file to download.
dst : str
Location to save zip file contents.
"""
import zipfile
filepath = download(path)
if not os.path.exists(dst):
os.makedirs(dst)
zf = zipfile.ZipFile(file=filepath)
zf.extractall(dst)
def load_audio(filename, b_normalize=True):
"""Load the audiofile at the provided filename using scipy.io.wavfile.
Optionally normalizes the audio to the maximum value.
Parameters
----------
filename : str
File to load.
b_normalize : bool, optional
Normalize to the maximum value.
"""
sr, s = wavfile.read(filename)
if b_normalize:
s = s.astype(np.float32)
s = (s / np.max(np.abs(s)))
s -= np.mean(s)
return s
def corrupt(x):
"""Take an input tensor and add uniform masking.
Parameters
----------
x : Tensor/Placeholder
Input to corrupt.
Returns
-------
x_corrupted : Tensor
50 pct of values corrupted.
"""
return tf.mul(x, tf.cast(tf.random_uniform(shape=tf.shape(x),
minval=0,
maxval=2,
dtype=tf.int32), tf.float32))
def interp(l, r, n_samples):
"""Intepolate between the arrays l and r, n_samples times.
Parameters
----------
l : np.ndarray
Left edge
r : np.ndarray
Right edge
n_samples : int
Number of samples
Returns
-------
arr : np.ndarray
Inteporalted array
"""
return np.array([
l + step_i / (n_samples - 1) * (r - l)
for step_i in range(n_samples)])
def make_latent_manifold(corners, n_samples):
"""Create a 2d manifold out of the provided corners: n_samples * n_samples.
Parameters
----------
corners : list of np.ndarray
The four corners to intepolate.
n_samples : int
Number of samples to use in interpolation.
Returns
-------
arr : np.ndarray
Stacked array of all 2D interpolated samples
"""
left = interp(corners[0], corners[1], n_samples)
right = interp(corners[2], corners[3], n_samples)
embedding = []
for row_i in range(n_samples):
embedding.append(interp(left[row_i], right[row_i], n_samples))
return np.vstack(embedding)
def imcrop_tosquare(img):
"""Make any image a square image.
Parameters
----------
img : np.ndarray
Input image to crop, assumed at least 2d.
Returns
-------
crop : np.ndarray
Cropped image.
"""
size = np.min(img.shape[:2])
extra = img.shape[:2] - size
crop = img
for i in np.flatnonzero(extra):
crop = np.take(crop, extra[i] // 2 + np.r_[:size], axis=i)
return crop
def slice_montage(montage, img_h, img_w, n_imgs):
"""Slice a montage image into n_img h x w images.
Performs the opposite of the montage function. Takes a montage image and
slices it back into a N x H x W x C image.
Parameters
----------
montage : np.ndarray
Montage image to slice.
img_h : int
Height of sliced image
img_w : int
Width of sliced image
n_imgs : int
Number of images to slice
Returns
-------
sliced : np.ndarray
Sliced images as 4d array.
"""
sliced_ds = []
for i in range(int(np.sqrt(n_imgs))):
for j in range(int(np.sqrt(n_imgs))):
sliced_ds.append(montage[
1 + i + i * img_h:1 + i + (i + 1) * img_h,
1 + j + j * img_w:1 + j + (j + 1) * img_w])
return np.array(sliced_ds)
def daily_minimal():
import os
fnames = []
for file in os.listdir("/Users/albert/Pictures/InstaRaider/daily_minimal/"):
if file.endswith(".jpg"):
fnames.append("/Users/albert/Pictures/InstaRaider/daily_minimal/" + file)
return np.array(fnames)
def montage(images, saveto='montage.png'):
"""Draw all images as a montage separated by 1 pixel borders.
Also saves the file to the destination specified by `saveto`.
Parameters
----------
images : numpy.ndarray
Input array to create montage of. Array should be:
batch x height x width x channels.
saveto : str
Location to save the resulting montage image.
Returns
-------
m : numpy.ndarray
Montage image.
"""
if isinstance(images, list):
images = np.array(images)
img_h = images.shape[1]
img_w = images.shape[2]
n_plots = int(np.ceil(np.sqrt(images.shape[0])))
if len(images.shape) == 4 and images.shape[3] == 3:
m = np.ones(
(images.shape[1] * n_plots + n_plots + 1,
images.shape[2] * n_plots + n_plots + 1, 3)) * 0.5
else:
m = np.ones(
(images.shape[1] * n_plots + n_plots + 1,
images.shape[2] * n_plots + n_plots + 1)) * 0.5
for i in range(n_plots):
for j in range(n_plots):
this_filter = i * n_plots + j
if this_filter < images.shape[0]:
this_img = images[this_filter]
m[1 + i + i * img_h:1 + i + (i + 1) * img_h,
1 + j + j * img_w:1 + j + (j + 1) * img_w] = this_img
plt.imsave(arr=m, fname=saveto)
return m
def montage_filters(W):
"""Draws all filters (n_input * n_output filters) as a
montage image separated by 1 pixel borders.
Parameters
----------
W : Tensor
Input tensor to create montage of.
Returns
-------
m : numpy.ndarray
Montage image.
"""
W = np.reshape(W, [W.shape[0], W.shape[1], 1, W.shape[2] * W.shape[3]])
n_plots = int(np.ceil(np.sqrt(W.shape[-1])))
m = np.ones(
(W.shape[0] * n_plots + n_plots + 1,
W.shape[1] * n_plots + n_plots + 1)) * 0.5
for i in range(n_plots):
for j in range(n_plots):
this_filter = i * n_plots + j
if this_filter < W.shape[-1]:
m[1 + i + i * W.shape[0]:1 + i + (i + 1) * W.shape[0],
1 + j + j * W.shape[1]:1 + j + (j + 1) * W.shape[1]] = (
np.squeeze(W[:, :, :, this_filter]))
return m
def get_celeb_files(dst='img_align_celeba', max_images=100):
"""Download the first 100 images of the celeb dataset.
Files will be placed in a directory 'img_align_celeba' if one
doesn't exist.
Returns
-------
files : list of strings
Locations to the first 100 images of the celeb net dataset.
"""
# Create a directory
if not os.path.exists(dst):
os.mkdir(dst)
# Now perform the following 100 times:
for img_i in range(1, max_images + 1):
# create a string using the current loop counter
f = '000%03d.jpg' % img_i
if not os.path.exists(os.path.join(dst, f)):
# and get the url with that string appended the end
url = 'https://s3.amazonaws.com/cadl/celeb-align/' + f
# We'll print this out to the console so we can see how far we've gone
print(url, end='\r')
# And now download the url to a location inside our new directory
urllib.request.urlretrieve(url, os.path.join(dst, f))
files = [os.path.join(dst, file_i)
for file_i in os.listdir(dst)
if '.jpg' in file_i][:max_images]
return files
def get_celeb_imgs(max_images=100):
"""Load the first `max_images` images of the celeb dataset.
Returns
-------
imgs : list of np.ndarray
List of the first 100 images from the celeb dataset
"""
return [plt.imread(f_i) for f_i in get_celeb_files(max_images=max_images)]
def gauss(mean, stddev, ksize):
"""Use Tensorflow to compute a Gaussian Kernel.
Parameters
----------
mean : float
Mean of the Gaussian (e.g. 0.0).
stddev : float
Standard Deviation of the Gaussian (e.g. 1.0).
ksize : int
Size of kernel (e.g. 16).
Returns
-------
kernel : np.ndarray
Computed Gaussian Kernel using Tensorflow.
"""
g = tf.Graph()
with tf.Session(graph=g):
x = tf.linspace(-3.0, 3.0, ksize)
z = (tf.exp(tf.neg(tf.pow(x - mean, 2.0) /
(2.0 * tf.pow(stddev, 2.0)))) *
(1.0 / (stddev * tf.sqrt(2.0 * 3.1415))))
return z.eval()
def gauss2d(mean, stddev, ksize):
"""Use Tensorflow to compute a 2D Gaussian Kernel.
Parameters
----------
mean : float
Mean of the Gaussian (e.g. 0.0).
stddev : float
Standard Deviation of the Gaussian (e.g. 1.0).
ksize : int
Size of kernel (e.g. 16).
Returns
-------
kernel : np.ndarray
Computed 2D Gaussian Kernel using Tensorflow.
"""
z = gauss(mean, stddev, ksize)
g = tf.Graph()
with tf.Session(graph=g):
z_2d = tf.matmul(tf.reshape(z, [ksize, 1]), tf.reshape(z, [1, ksize]))
return z_2d.eval()
def convolve(img, kernel):
"""Use Tensorflow to convolve a 4D image with a 4D kernel.
Parameters
----------
img : np.ndarray
4-dimensional image shaped N x H x W x C
kernel : np.ndarray
4-dimensional image shape K_H, K_W, C_I, C_O corresponding to the
kernel's height and width, the number of input channels, and the
number of output channels. Note that C_I should = C.
Returns
-------
result : np.ndarray
Convolved result.
"""
g = tf.Graph()
with tf.Session(graph=g):
convolved = tf.nn.conv2d(img, kernel, strides=[1, 1, 1, 1], padding='SAME')
res = convolved.eval()
return res
def gabor(ksize=32):
"""Use Tensorflow to compute a 2D Gabor Kernel.
Parameters
----------
ksize : int, optional
Size of kernel.
Returns
-------
gabor : np.ndarray
Gabor kernel with ksize x ksize dimensions.
"""
g = tf.Graph()
with tf.Session(graph=g):
z_2d = gauss2d(0.0, 1.0, ksize)
ones = tf.ones((1, ksize))
ys = tf.sin(tf.linspace(-3.0, 3.0, ksize))
ys = tf.reshape(ys, [ksize, 1])
wave = tf.matmul(ys, ones)
gabor = tf.mul(wave, z_2d)
return gabor.eval()
def build_submission(filename, file_list, optional_file_list=()):
"""Helper utility to check homework assignment submissions and package them.
Parameters
----------
filename : str
Output zip file name
file_list : tuple
Tuple of files to include
"""
# check each file exists
for part_i, file_i in enumerate(file_list):
if not os.path.exists(file_i):
print('\nYou are missing the file {}. '.format(file_i) +
'It does not look like you have completed Part {}.'.format(
part_i + 1))
def zipdir(path, zf):
for root, dirs, files in os.walk(path):
for file in files:
# make sure the files are part of the necessary file list
if file.endswith(file_list) or file.endswith(optional_file_list):
zf.write(os.path.join(root, file))
# create a zip file with the necessary files
zipf = zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED)
zipdir('.', zipf)
zipf.close()
print('Your assignment zip file has been created!')
print('Now submit the file:\n{}\nto Kadenze for grading!'.format(
os.path.abspath(filename)))
def normalize(a, s=0.1):
'''Normalize the image range for visualization'''
return np.uint8(np.clip(
(a - a.mean()) / max(a.std(), 1e-4) * s + 0.5,
0, 1) * 255)
# %%
def weight_variable(shape, **kwargs):
'''Helper function to create a weight variable initialized with
a normal distribution
Parameters
----------
shape : list
Size of weight variable
'''
if isinstance(shape, list):
initial = tf.random_normal(tf.pack(shape), mean=0.0, stddev=0.01)
initial.set_shape(shape)
else:
initial = tf.random_normal(shape, mean=0.0, stddev=0.01)
return tf.Variable(initial, **kwargs)
# %%
def bias_variable(shape, **kwargs):
'''Helper function to create a bias variable initialized with
a constant value.
Parameters
----------
shape : list
Size of weight variable
'''
if isinstance(shape, list):
initial = tf.random_normal(tf.pack(shape), mean=0.0, stddev=0.01)
initial.set_shape(shape)
else:
initial = tf.random_normal(shape, mean=0.0, stddev=0.01)
return tf.Variable(initial, **kwargs)
def binary_cross_entropy(z, x):
"""Binary Cross Entropy measures cross entropy of a binary variable.
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Parameters
----------
z : tf.Tensor
A `Tensor` of the same type and shape as `x`.
x : tf.Tensor
A `Tensor` of type `float32` or `float64`.
"""
eps = 1e-12
return (-(x * tf.log(z + eps) +
(1. - x) * tf.log(1. - z + eps)))
def conv2d(x, n_output,
k_h=5, k_w=5, d_h=2, d_w=2,
padding='SAME', name='conv2d', reuse=None):
"""Helper for creating a 2d convolution operation.
Parameters
----------
x : tf.Tensor
Input tensor to convolve.
n_output : int
Number of filters.
k_h : int, optional
Kernel height
k_w : int, optional
Kernel width
d_h : int, optional
Height stride
d_w : int, optional
Width stride
padding : str, optional
Padding type: "SAME" or "VALID"
name : str, optional
Variable scope
Returns
-------
op : tf.Tensor
Output of convolution
"""
with tf.variable_scope(name or 'conv2d', reuse=reuse):
W = tf.get_variable(
name='W',
shape=[k_h, k_w, x.get_shape()[-1], n_output],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
conv = tf.nn.conv2d(
name='conv',
input=x,
filter=W,
strides=[1, d_h, d_w, 1],
padding=padding)
b = tf.get_variable(
name='b',
shape=[n_output],
initializer=tf.constant_initializer(0.0))
h = tf.nn.bias_add(
name='h',
value=conv,
bias=b)
return h, W
def deconv2d(x, n_output_h, n_output_w, n_output_ch, n_input_ch=None,
k_h=5, k_w=5, d_h=2, d_w=2,
padding='SAME', name='deconv2d', reuse=None):
"""Deconvolution helper.
Parameters
----------
x : tf.Tensor
Input tensor to convolve.
n_output_h : int
Height of output
n_output_w : int
Width of output
n_output_ch : int
Number of filters.
k_h : int, optional
Kernel height
k_w : int, optional
Kernel width
d_h : int, optional
Height stride
d_w : int, optional
Width stride
padding : str, optional
Padding type: "SAME" or "VALID"
name : str, optional
Variable scope
Returns
-------
op : tf.Tensor
Output of deconvolution
"""
with tf.variable_scope(name or 'deconv2d', reuse=reuse):
W = tf.get_variable(
name='W',
shape=[k_h, k_h, n_output_ch, n_input_ch or x.get_shape()[-1]],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
conv = tf.nn.conv2d_transpose(
name='conv_t',
value=x,
filter=W,
output_shape=tf.pack(
[tf.shape(x)[0], n_output_h, n_output_w, n_output_ch]),
strides=[1, d_h, d_w, 1],
padding=padding)
conv.set_shape([None, n_output_h, n_output_w, n_output_ch])
b = tf.get_variable(
name='b',
shape=[n_output_ch],
initializer=tf.constant_initializer(0.0))
h = tf.nn.bias_add(name='h', value=conv, bias=b)
return h, W
def lrelu(features, leak=0.2):
"""Leaky rectifier.
Parameters
----------
features : tf.Tensor
Input to apply leaky rectifier to.
leak : float, optional
Percentage of leak.
Returns
-------
op : tf.Tensor
Resulting output of applying leaky rectifier activation.
"""
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * features + f2 * abs(features)
def linear(x, n_output, name=None, activation=None, reuse=None):
"""Fully connected layer.
Parameters
----------
x : tf.Tensor
Input tensor to connect
n_output : int
Number of output neurons
name : None, optional
Scope to apply
Returns
-------
h, W : tf.Tensor, tf.Tensor
Output of fully connected layer and the weight matrix
"""
if len(x.get_shape()) != 2:
x = flatten(x, reuse=reuse)
n_input = x.get_shape().as_list()[1]
with tf.variable_scope(name or "fc", reuse=reuse):
W = tf.get_variable(
name='W',
shape=[n_input, n_output],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(
name='b',
shape=[n_output],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0))
h = tf.nn.bias_add(
name='h',
value=tf.matmul(x, W),
bias=b)
if activation:
h = activation(h)
return h, W
def flatten(x, name=None, reuse=None):
"""Flatten Tensor to 2-dimensions.
Parameters
----------
x : tf.Tensor
Input tensor to flatten.
name : None, optional
Variable scope for flatten operations
Returns
-------
flattened : tf.Tensor
Flattened tensor.
"""
with tf.variable_scope('flatten'):
dims = x.get_shape().as_list()
if len(dims) == 4:
flattened = tf.reshape(
x,
shape=[-1, dims[1] * dims[2] * dims[3]])
elif len(dims) == 2 or len(dims) == 1:
flattened = x
else:
raise ValueError('Expected n dimensions of 1, 2 or 4. Found:',
len(dims))
return flattened
def to_tensor(x):
"""Convert 2 dim Tensor to a 4 dim Tensor ready for convolution.
Performs the opposite of flatten(x). If the tensor is already 4-D, this
returns the same as the input, leaving it unchanged.
Parameters
----------
x : tf.Tesnor
Input 2-D tensor. If 4-D already, left unchanged.
Returns
-------
x : tf.Tensor
4-D representation of the input.
Raises
------
ValueError
If the tensor is not 2D or already 4D.
"""
if len(x.get_shape()) == 2:
n_input = x.get_shape().as_list()[1]
x_dim = np.sqrt(n_input)
if x_dim == int(x_dim):
x_dim = int(x_dim)
x_tensor = tf.reshape(
x, [-1, x_dim, x_dim, 1], name='reshape')
elif np.sqrt(n_input / 3) == int(np.sqrt(n_input / 3)):
x_dim = int(np.sqrt(n_input / 3))
x_tensor = tf.reshape(
x, [-1, x_dim, x_dim, 3], name='reshape')
else:
x_tensor = tf.reshape(
x, [-1, 1, 1, n_input], name='reshape')
elif len(x.get_shape()) == 4:
x_tensor = x
else:
raise ValueError('Unsupported input dimensions')
return x_tensor
|
AlfredNeverKog/BrainCarya
|
src/my/kadenze/lesson4/utils.py
|
Python
|
mit
| 21,223
|
[
"Gaussian"
] |
c6de3641ccfad9462b5837679089e3852c92bab088c9010c4ec4472ee36392fc
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides a container class to store parameters for the
geometry of an ellipse.
"""
import math
from astropy import log
import numpy as np
__all__ = ['EllipseGeometry']
IN_MASK = [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]
OUT_MASK = [
[1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1],
[1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1],
[1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1],
[1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1],
[1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
]
def _area(sma, eps, phi, r):
"""
Compute elliptical sector area.
"""
aux = r * math.cos(phi) / sma
signal = aux / abs(aux)
if abs(aux) >= 1.:
aux = signal
return abs(sma**2 * (1.-eps) / 2. * math.acos(aux))
class EllipseGeometry:
r"""
Container class to store parameters for the geometry of an ellipse.
Parameters that describe the relationship of a given ellipse with
other associated ellipses are also encapsulated in this container.
These associated ellipses may include, e.g., the two (inner and
outer) bounding ellipses that are used to build sectors along the
elliptical path. These sectors are used as areas for integrating
pixel values, when the area integration mode (mean or median) is
used.
This class also keeps track of where in the ellipse we are when
performing an 'extract' operation. This is mostly relevant when
using an area integration mode (as opposed to a pixel integration
mode)
Parameters
----------
x0, y0 : float
The center pixel coordinate of the ellipse.
sma : float
The semimajor axis of the ellipse in pixels.
eps : ellipticity
The ellipticity of the ellipse.
pa : float
The position angle (in radians) of the semimajor axis in
relation to the positive x axis of the image array (rotating
towards the positive y axis). Position angles are defined in the
range :math:`0 < PA <= \pi`. Avoid using as starting position
angle of 0., since the fit algorithm may not work properly.
When the ellipses are such that position angles are near either
extreme of the range, noise can make the solution jump back and
forth between successive isophotes, by amounts close to 180
degrees.
astep : float, optional
The step value for growing/shrinking the semimajor axis. It can
be expressed either in pixels (when ``linear_growth=True``) or
as a relative value (when ``linear_growth=False``). The default
is 0.1.
linear_growth : bool, optional
The semimajor axis growing/shrinking mode. The default is
`False`.
fix_center : bool, optional
Keep center of ellipse fixed during fit? The default is False.
fix_pa : bool, optional
Keep position angle of semi-major axis of ellipse fixed during fit?
The default is False.
fix_eps : bool, optional
Keep ellipticity of ellipse fixed during fit? The default is False.
"""
def __init__(self, x0, y0, sma, eps, pa, astep=0.1, linear_growth=False,
fix_center=False, fix_pa=False, fix_eps=False):
self.x0 = x0
self.y0 = y0
self.sma = sma
self.eps = eps
self.pa = pa
self.astep = astep
self.linear_growth = linear_growth
# Fixed parameters are flagged in here. Note that the
# ordering must follow the same ordering used in the
# fitter._CORRECTORS list.
self.fix = np.array([fix_center, fix_center, fix_pa, fix_eps])
# limits for sector angular width
self._phi_min = 0.05
self._phi_max = 0.2
# variables used in the calculation of the sector angular width
sma1, sma2 = self.bounding_ellipses()
inner_sma = min((sma2 - sma1), 3.)
self._area_factor = (sma2 - sma1) * inner_sma
# sma can eventually be zero!
if self.sma > 0.:
self.sector_angular_width = max(min((inner_sma / self.sma),
self._phi_max), self._phi_min)
self.initial_polar_angle = self.sector_angular_width / 2.
self.initial_polar_radius = self.radius(self.initial_polar_angle)
def find_center(self, image, threshold=0.1, verbose=True):
"""
Find the center of a galaxy.
If the algorithm is successful the (x, y) coordinates in this
`~photutils.isophote.EllipseGeometry` (i.e., the ``x0`` and
``y0`` attributes) instance will be modified.
The isophote fit algorithm requires an initial guess for the
galaxy center (x, y) coordinates and these coordinates must be
close to the actual galaxy center for the isophote fit to work.
This method provides can provide an initial guess for the galaxy
center coordinates. See the **Notes** section below for more
details.
Parameters
----------
image : 2D `~numpy.ndarray`
The image array. Masked arrays are not recognized here. This
assumes that centering should always be done on valid pixels.
threshold : float, optional
The centerer threshold. To turn off the centerer, set this
to a large value (i.e., >> 1). The default is 0.1.
verbose : bool, optional
Whether to print object centering information. The default is
`True`.
Notes
-----
The centerer function scans a 10x10 window centered on the (x,
y) coordinates in the `~photutils.isophote.EllipseGeometry`
instance passed to the constructor of the
`~photutils.isophote.Ellipse` class. If any of the
`~photutils.isophote.EllipseGeometry` (x, y) coordinates are
`None`, the center of the input image frame is used. If the
center acquisition is successful, the
`~photutils.isophote.EllipseGeometry` instance is modified in
place to reflect the solution of the object centerer algorithm.
In some cases the object centerer algorithm may fail even though
there is enough signal-to-noise to start a fit (e.g., objects
with very high ellipticity). In those cases the sensitivity
of the algorithm can be decreased by decreasing the value of
the object centerer threshold parameter. The centerer works by
looking where a quantity akin to a signal-to-noise ratio is
maximized within the 10x10 window. The centerer can thus be shut
off entirely by setting the threshold to a large value (i.e.,
>> 1; meaning no location inside the search window will achieve
that signal-to-noise ratio).
"""
self._centerer_mask_half_size = len(IN_MASK) / 2
self.centerer_threshold = threshold
# number of pixels in each mask
sz = len(IN_MASK)
self._centerer_ones_in = np.ma.masked_array(np.ones(shape=(sz, sz)),
mask=IN_MASK)
self._centerer_ones_out = np.ma.masked_array(np.ones(shape=(sz, sz)),
mask=OUT_MASK)
self._centerer_in_mask_npix = np.sum(self._centerer_ones_in)
self._centerer_out_mask_npix = np.sum(self._centerer_ones_out)
# Check if center coordinates point to somewhere inside the frame.
# If not, set then to frame center.
shape = image.shape
_x0 = self.x0
_y0 = self.y0
if (_x0 is None or _x0 < 0 or _x0 >= shape[1] or _y0 is None or
_y0 < 0 or _y0 >= shape[0]):
_x0 = shape[1] / 2
_y0 = shape[0] / 2
max_fom = 0.
max_i = 0
max_j = 0
# scan all positions inside window
window_half_size = 5
for i in range(int(_x0 - window_half_size),
int(_x0 + window_half_size) + 1):
for j in range(int(_y0 - window_half_size),
int(_y0 + window_half_size) + 1):
# ensure that it stays inside image frame
i1 = int(max(0, i - self._centerer_mask_half_size))
j1 = int(max(0, j - self._centerer_mask_half_size))
i2 = int(min(shape[1] - 1, i + self._centerer_mask_half_size))
j2 = int(min(shape[0] - 1, j + self._centerer_mask_half_size))
window = image[j1:j2, i1:i2]
# averages in inner and outer regions.
inner = np.ma.masked_array(window, mask=IN_MASK)
outer = np.ma.masked_array(window, mask=OUT_MASK)
inner_avg = np.sum(inner) / self._centerer_in_mask_npix
outer_avg = np.sum(outer) / self._centerer_out_mask_npix
# standard deviation and figure of merit
inner_std = np.std(inner)
outer_std = np.std(outer)
stddev = np.sqrt(inner_std**2 + outer_std**2)
fom = (inner_avg - outer_avg) / stddev
if fom > max_fom:
max_fom = fom
max_i = i
max_j = j
# figure of merit > threshold: update geometry with new coordinates.
if max_fom > threshold:
self.x0 = float(max_i)
self.y0 = float(max_j)
if verbose:
log.info(f'Found center at x0 = {self.x0:5.1f}, '
f'y0 = {self.y0:5.1f}')
else:
if verbose:
log.info('Result is below the threshold -- keeping the '
'original coordinates.')
def radius(self, angle):
"""
Calculate the polar radius for a given polar angle.
Parameters
----------
angle : float
The polar angle (radians).
Returns
-------
radius : float
The polar radius (pixels).
"""
return (self.sma * (1. - self.eps) /
np.sqrt(((1. - self.eps) * np.cos(angle))**2 +
(np.sin(angle))**2))
def initialize_sector_geometry(self, phi):
"""
Initialize geometry attributes associated with an elliptical
sector at the given polar angle ``phi``.
This function computes:
* the four vertices that define the elliptical sector on the
pixel array.
* the sector area (saved in the ``sector_area`` attribute)
* the sector angular width (saved in ``sector_angular_width``
attribute)
Parameters
----------
phi : float
The polar angle (radians) where the sector is located.
Returns
-------
x, y : 1D `~numpy.ndarray`
The x and y coordinates of each vertex as 1D arrays.
"""
# These polar radii bound the region between the inner
# and outer ellipses that define the sector.
sma1, sma2 = self.bounding_ellipses()
eps_ = 1. - self.eps
# polar vector at one side of the elliptical sector
self._phi1 = phi - self.sector_angular_width / 2.
r1 = (sma1 * eps_ / math.sqrt((eps_ * math.cos(self._phi1))**2
+ (math.sin(self._phi1))**2))
r2 = (sma2 * eps_ / math.sqrt((eps_ * math.cos(self._phi1))**2
+ (math.sin(self._phi1))**2))
# polar vector at the other side of the elliptical sector
self._phi2 = phi + self.sector_angular_width / 2.
r3 = (sma2 * eps_ / math.sqrt((eps_ * math.cos(self._phi2))**2
+ (math.sin(self._phi2))**2))
r4 = (sma1 * eps_ / math.sqrt((eps_ * math.cos(self._phi2))**2
+ (math.sin(self._phi2))**2))
# sector area
sa1 = _area(sma1, self.eps, self._phi1, r1)
sa2 = _area(sma2, self.eps, self._phi1, r2)
sa3 = _area(sma2, self.eps, self._phi2, r3)
sa4 = _area(sma1, self.eps, self._phi2, r4)
self.sector_area = abs((sa3 - sa2) - (sa4 - sa1))
# angular width of sector. It is calculated such that the sectors
# come out with roughly constant area along the ellipse.
self.sector_angular_width = max(min((self._area_factor / (r3 - r4) /
r4), self._phi_max),
self._phi_min)
# compute the 4 vertices that define the elliptical sector.
vertex_x = np.zeros(shape=4, dtype=float)
vertex_y = np.zeros(shape=4, dtype=float)
# vertices are labelled in counterclockwise sequence
vertex_x[0:2] = np.array([r1, r2]) * math.cos(self._phi1 + self.pa)
vertex_x[2:4] = np.array([r4, r3]) * math.cos(self._phi2 + self.pa)
vertex_y[0:2] = np.array([r1, r2]) * math.sin(self._phi1 + self.pa)
vertex_y[2:4] = np.array([r4, r3]) * math.sin(self._phi2 + self.pa)
vertex_x += self.x0
vertex_y += self.y0
return vertex_x, vertex_y
def bounding_ellipses(self):
"""
Compute the semimajor axis of the two ellipses that bound the
annulus where integrations take place.
Returns
-------
sma1, sma2 : float
The smaller and larger values of semimajor axis length that
define the annulus bounding ellipses.
"""
if self.linear_growth:
a1 = self.sma - self.astep / 2.
a2 = self.sma + self.astep / 2.
else:
a1 = self.sma * (1. - self.astep / 2.)
a2 = self.sma * (1. + self.astep / 2.)
return a1, a2
def polar_angle_sector_limits(self):
"""
Return the two polar angles that bound the sector.
The two bounding polar angles become available only after
calling the
:meth:`~photutils.isophote.EllipseGeometry.initialize_sector_geometry`
method.
Returns
-------
phi1, phi2 : float
The smaller and larger values of polar angle that bound the
current sector.
"""
return self._phi1, self._phi2
def to_polar(self, x, y):
r"""
Return the radius and polar angle in the ellipse coordinate
system given (x, y) pixel image coordinates.
This function takes care of the different definitions for
position angle (PA) and polar angle (phi):
.. math::
-\pi < PA < \pi
0 < phi < 2 \pi
Note that radius can be anything. The solution is not tied to
the semimajor axis length, but to the center position and tilt
angle.
Parameters
----------
x, y : float
The (x, y) image coordinates.
Returns
-------
radius, angle : float
The ellipse radius and polar angle.
"""
# We split in between a scalar version and a
# vectorized version. This is necessary for
# now so we don't pay a heavy speed penalty
# that is incurred when using vectorized code.
# The split in two separate functions helps in
# the profiling analysis: most of the time is
# spent in the scalar function.
if isinstance(x, (int, float)):
return self._to_polar_scalar(x, y)
else:
return self._to_polar_vectorized(x, y)
def _to_polar_scalar(self, x, y):
x1 = x - self.x0
y1 = y - self.y0
radius = x1**2 + y1**2
if radius > 0.0:
radius = math.sqrt(radius)
angle = math.asin(abs(y1) / radius)
else:
radius = 0.
angle = 1.
if x1 >= 0. and y1 < 0.:
angle = 2*np.pi - angle
elif x1 < 0. and y1 >= 0.:
angle = np.pi - angle
elif x1 < 0. and y1 < 0.:
angle = np.pi + angle
pa1 = self.pa
if self.pa < 0.:
pa1 = self.pa + 2*np.pi
angle = angle - pa1
if angle < 0.:
angle = angle + 2*np.pi
return radius, angle
def _to_polar_vectorized(self, x, y):
x1 = np.atleast_2d(x) - self.x0
y1 = np.atleast_2d(y) - self.y0
radius = x1**2 + y1**2
angle = np.ones(radius.shape)
imask = (radius > 0.0)
radius[imask] = np.sqrt(radius[imask])
angle[imask] = np.arcsin(np.abs(y1[imask]) / radius[imask])
radius[~imask] = 0.
angle[~imask] = 1.
idx = (x1 >= 0.) & (y1 < 0)
angle[idx] = 2*np.pi - angle[idx]
idx = (x1 < 0.) & (y1 >= 0.)
angle[idx] = np.pi - angle[idx]
idx = (x1 < 0.) & (y1 < 0.)
angle[idx] = np.pi + angle[idx]
pa1 = self.pa
if self.pa < 0.:
pa1 = self.pa + 2*np.pi
angle = angle - pa1
angle[angle < 0] += 2*np.pi
return radius, angle
def update_sma(self, step):
"""
Calculate an updated value for the semimajor axis, given the
current value and the step value.
The step value must be managed by the caller to support both
modes: grow outwards and shrink inwards.
Parameters
----------
step : float
The step value.
Returns
-------
sma : float
The new semimajor axis length.
"""
if self.linear_growth:
sma = self.sma + step
else:
sma = self.sma * (1. + step)
return sma
def reset_sma(self, step):
"""
Change the direction of semimajor axis growth, from outwards to
inwards.
Parameters
----------
step : float
The current step value.
Returns
-------
sma, new_step : float
The new semimajor axis length and the new step value to
initiate the shrinking of the semimajor axis length. This is
the step value that should be used when calling the
:meth:`~photutils.isophote.EllipseGeometry.update_sma`
method.
"""
if self.linear_growth:
sma = self.sma - step
step = -step
else:
aux = 1. / (1. + step)
sma = self.sma * aux
step = aux - 1.
return sma, step
|
astropy/photutils
|
photutils/isophote/geometry.py
|
Python
|
bsd-3-clause
| 19,964
|
[
"Galaxy"
] |
8fccf24664c965b951502d17a35e41f79df34f2f725aa38a961886a0bfaacc53
|
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import ast
import collections
import traceback
DEBUG_STATEMENTS = set(['pdb', 'ipdb', 'pudb', 'q'])
DebugStatement = collections.namedtuple(
'DebugStatement', ['name', 'line', 'col'],
)
class ImportStatementParser(ast.NodeVisitor):
def __init__(self):
self.debug_import_statements = []
def visit_Import(self, node):
for node_name in node.names:
if node_name.name in DEBUG_STATEMENTS:
self.debug_import_statements.append(
DebugStatement(node_name.name, node.lineno, node.col_offset),
)
def visit_ImportFrom(self, node):
if node.module in DEBUG_STATEMENTS:
self.debug_import_statements.append(
DebugStatement(node.module, node.lineno, node.col_offset)
)
def check_file_for_debug_statements(filename):
try:
ast_obj = ast.parse(open(filename).read(), filename=filename)
except SyntaxError:
print('{0} - Could not parse ast'.format(filename))
print()
print('\t' + traceback.format_exc().replace('\n', '\n\t'))
print()
return 1
visitor = ImportStatementParser()
visitor.visit(ast_obj)
if visitor.debug_import_statements:
for debug_statement in visitor.debug_import_statements:
print(
'{0}:{1}:{2} - {3} imported'.format(
filename,
debug_statement.line,
debug_statement.col,
debug_statement.name,
)
)
return 1
else:
return 0
def debug_statement_hook(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='Filenames to run')
args = parser.parse_args(argv)
retv = 0
for filename in args.filenames:
retv |= check_file_for_debug_statements(filename)
return retv
if __name__ == '__main__':
exit(debug_statement_hook())
|
arahayrabedian/pre-commit-hooks
|
pre_commit_hooks/debug_statement_hook.py
|
Python
|
mit
| 2,072
|
[
"VisIt"
] |
ff9cab6ca5bbeda17941c7a1922732f25c36eef9f8e4b573514106edd8baa6a7
|
import math
class NeuronConnection(object):
def __init__(self, neuron, weight=1.0):
self.neuron = neuron
self.weight = weight
class Neuron(object):
def __init__(self, input=None):
self.input = input
self.output = None
self.incoming_neurons = []
self.outgoing_neurons = []
def connect_child(self, child_neuron, weight=1.0):
'''Connect a child neuron to our output, then connect ourselves as input to child neuron'''
self.outgoing_neurons.append(NeuronConnection(child_neuron, weight=weight))
child_neuron.incoming_neurons.append(NeuronConnection(self, weight=weight))
def activate(self):
self.output = 1 / (1 + math.exp(-self.sum_inputs))
return self.output
@property
def sum_inputs(self):
sum = 0
if self.input:
# If we're an input
return self.input
else:
# Otherwise we're a normal neuron
for connection in self.incoming_neurons:
if connection.neuron.output == None:
connection.neuron.activate()
sum += connection.neuron.output * connection.weight
return sum
# TODO: NOT gate
# inputs 0 or 1 returns opposite, 1 and 0 respectively
if __name__ == "__main__":
neuron = Neuron(input=0)
neuron_2 = Neuron()
neuron.connect_child(neuron_2, weight=1)
neuron_2.activate()
print "Output:", neuron_2.output
|
dev-coop/neural-net-hacking-examples
|
python/Part 1/neural_network_with_connections.py
|
Python
|
mit
| 1,475
|
[
"NEURON"
] |
feffa1fe5a854e2345646f77b3d405d31058928c96f2fbb4a4446f57dbfc84e3
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import functions
import Image
import math
class Anaglyph:
"Anaglyph support class"
def __init__(self, parent):
self.vergence = 0 # Horizontal separation
self.vsep = 0 # Vertical separation
self.left = self.right = '' # Right and left Images
self.height = self.width = 0 # Height and Width
self.conf = functions.getConfig(self, 'anaglyph')
if self.conf == 0: # default configuration
self.conf = {}
self.conf['type'] = 'red/cyan'
def __del__(self):
functions.saveConfig(self, 'anaglyph', self.conf)
def open(self, path, anaglyph=False):
try:
self.left, self.right = functions.set_sources_from_stereo(self, path, anaglyph)
self.oleft, self.oright = self.left, self.right # Back-up
size = self.left.size
self.height, self.width = size[1], size[0]
except:
print "Error while opening"
def open2(self, path='None', image='None'):
if path != 'None':
functions.set_sources_from_images(self, path[0], path[1])
elif image[0] != '':
self.left, self.right = image[0], image[1]
self.oleft, self.oright = image[0], image[1] # Back-up
taille = self.right.size
self.height, self.width = taille[1], taille[0]
def make(self, parent, fullscreen):
width = self.width + math.fabs(self.vergence)
height = self.height + math.fabs(self.vsep)
self.stereo = Image.new('RGB', (width,height)) # Final image
self.make_colored()
drawable = functions.image_to_drawable(self, self.stereo)
x = (parent.max_width - width) / 2
y = (parent.max_height - height) / 2
parent.stereo.window.draw_drawable(parent.gc, drawable, 0, 0, x, y, -1, -1)
def make_colored(self):
rg, vg, bg = self.left.split()
rd, vd, bd = self.right.split()
if self.conf['type'] == "red/cyan":
source = [rg, vd, bd]
elif self.conf['type'] == "green/magenta":
source = [rd, vg, bd]
elif self.conf['type'] == "blue/amber":
source = [rd, vd, bg]
self.stereo = Image.merge("RGB", source)
def make_halfColored(self):
rd, vd, bd = self.right.split()
if self.conf['type'] == "red/cyan":
filter = ( 0.299, 0.587, 0.114, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
left = self.left.convert("RGB", filter)
elif self.conf['type'] == "green/magenta":
filter = ( 0, 0, 0, 0, 0.299, 0.587, 0.114, 0, 0, 0, 0, 0 )
left = self.left.convert("RGB", filter)
elif self.conf['type'] == "blue/amber":
filter = ( 0, 0, 0, 0, 0, 0, 0, 0, 0.299, 0.587, 0.114, 0 )
left = self.left.convert("RGB", filter)
#self.stereo = Image.merge("RGB", source)
def make_optimized(self):
if self.conf['type'] == "red/cyan":
filter = ( 0, 0.7, 0.3, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
left = self.left.convert("RGB", filter)
elif self.conf['type'] == "green/magenta":
filter = ( 0, 0, 0, 0, 0, 0.7, 0.3, 0, 0, 0, 0, 0 )
left = self.left.convert("RGB", filter)
elif self.conf['type'] == "blue/amber":
filter = ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.7, 0.3, 0 )
left = self.left.convert("RGB", filter)
#self.stereo = Image.composite(left, right, left)
def resize(self, maxw, maxh, force=0, normal=0):
if normal == 1: # Scale 1:1
self.right, self.left = self.oright, self.oleft # Backup
taille = self.right.size
self.height, self.width = taille[1], taille[0]
elif self.height > 0 and self.width > 0:
if self.height > maxh or self.width > maxw or force == 1:
qrh, qrw = (self.height + 0.00000000) / maxh, (self.width + 0.00000000) / maxw
qrmax = max(qrh, qrw)
height, width = int(math.ceil(self.height / qrmax)), int(math.ceil(self.width / qrmax))
self.right, self.left = self.oright, self.oleft # Backup
self.right, self.left = self.right.resize((width, height), Image.ANTIALIAS), self.left.resize((width, height), Image.ANTIALIAS)
self.height, self.width = height, width
def swap_eyes(self):
self.tempimg = self.left
self.left = self.right
self.right = self.tempimg
self.tempimg = ''
|
magestik/TuxStereoViewer
|
src/lib_anaglyph.py
|
Python
|
gpl-3.0
| 4,017
|
[
"Amber"
] |
665a55bd9472e7cc195485d66223e4deb42ed92583fa5bf4ce68532d34e8e968
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Defines SymmetryGroup parent class and PointGroup and SpaceGroup classes.
Shyue Ping Ong thanks Marc De Graef for his generous sharing of his
SpaceGroup data as published in his textbook "Structure of Materials".
"""
import os
import re
import warnings
from abc import ABCMeta, abstractmethod
from collections.abc import Sequence
from fractions import Fraction
from itertools import product
import numpy as np
from monty.design_patterns import cached_class
from monty.serialization import loadfn
from pymatgen.core.operations import SymmOp
from pymatgen.util.string import Stringify
SYMM_DATA = None
def _get_symm_data(name):
global SYMM_DATA
if SYMM_DATA is None:
SYMM_DATA = loadfn(os.path.join(os.path.dirname(__file__), "symm_data.json"))
return SYMM_DATA[name]
class SymmetryGroup(Sequence, Stringify, metaclass=ABCMeta):
"""
Abstract class representation a symmetry group.
"""
@property
@abstractmethod
def symmetry_ops(self):
"""
:return: List of symmetry operations
"""
def __contains__(self, item):
for i in self.symmetry_ops:
if np.allclose(i.affine_matrix, item.affine_matrix):
return True
return False
def __hash__(self):
return self.__len__()
def __getitem__(self, item):
return self.symmetry_ops[item]
def __len__(self):
return len(self.symmetry_ops)
def is_subgroup(self, supergroup):
"""
True if this group is a subgroup of the supplied group.
Args:
supergroup (SymmetryGroup): Supergroup to test.
Returns:
True if this group is a subgroup of the supplied group.
"""
warnings.warn("This is not fully functional. Only trivial subsets are tested right now. ")
return set(self.symmetry_ops).issubset(supergroup.symmetry_ops)
def is_supergroup(self, subgroup):
"""
True if this group is a supergroup of the supplied group.
Args:
subgroup (SymmetryGroup): Subgroup to test.
Returns:
True if this group is a supergroup of the supplied group.
"""
warnings.warn("This is not fully functional. Only trivial subsets are tested right now. ")
return set(subgroup.symmetry_ops).issubset(self.symmetry_ops)
def to_latex_string(self) -> str:
"""
Returns:
A latex formatted group symbol with proper subscripts and overlines.
"""
sym = re.sub(r"_(\d+)", r"$_{\1}$", self.to_pretty_string())
return re.sub(r"-(\d)", r"$\\overline{\1}$", sym)
@cached_class
class PointGroup(SymmetryGroup):
"""
Class representing a Point Group, with generators and symmetry operations.
.. attribute:: symbol
Full International or Hermann-Mauguin Symbol.
.. attribute:: generators
List of generator matrices. Note that 3x3 matrices are used for Point
Groups.
.. attribute:: symmetry_ops
Full set of symmetry operations as matrices.
"""
def __init__(self, int_symbol):
"""
Initializes a Point Group from its international symbol.
Args:
int_symbol (str): International or Hermann-Mauguin Symbol.
"""
self.symbol = int_symbol
self.generators = [
_get_symm_data("generator_matrices")[c] for c in _get_symm_data("point_group_encoding")[int_symbol]
]
self._symmetry_ops = {SymmOp.from_rotation_and_translation(m) for m in self._generate_full_symmetry_ops()}
self.order = len(self._symmetry_ops)
@property
def symmetry_ops(self):
"""
:return: List of symmetry operations for SpaceGroup
"""
return self._symmetry_ops
def _generate_full_symmetry_ops(self):
symm_ops = list(self.generators)
new_ops = self.generators
while len(new_ops) > 0:
gen_ops = []
for g1, g2 in product(new_ops, symm_ops):
op = np.dot(g1, g2)
if not in_array_list(symm_ops, op):
gen_ops.append(op)
symm_ops.append(op)
new_ops = gen_ops
return symm_ops
def get_orbit(self, p, tol=1e-5):
"""
Returns the orbit for a point.
Args:
p: Point as a 3x1 array.
tol: Tolerance for determining if sites are the same. 1e-5 should
be sufficient for most purposes. Set to 0 for exact matching
(and also needed for symbolic orbits).
Returns:
([array]) Orbit for point.
"""
orbit = []
for o in self.symmetry_ops:
pp = o.operate(p)
if not in_array_list(orbit, pp, tol=tol):
orbit.append(pp)
return orbit
@cached_class
class SpaceGroup(SymmetryGroup):
"""
Class representing a SpaceGroup.
.. attribute:: symbol
Full International or Hermann-Mauguin Symbol.
.. attribute:: int_number
International number
.. attribute:: generators
List of generator matrices. Note that 4x4 matrices are used for Space
Groups.
.. attribute:: order
Order of Space Group
"""
SYMM_OPS = loadfn(os.path.join(os.path.dirname(__file__), "symm_ops.json"))
SG_SYMBOLS = set(_get_symm_data("space_group_encoding").keys())
for op in SYMM_OPS:
op["hermann_mauguin"] = re.sub(r" ", "", op["hermann_mauguin"])
op["universal_h_m"] = re.sub(r" ", "", op["universal_h_m"])
SG_SYMBOLS.add(op["hermann_mauguin"])
SG_SYMBOLS.add(op["universal_h_m"])
gen_matrices = _get_symm_data("generator_matrices")
# POINT_GROUP_ENC = SYMM_DATA["point_group_encoding"]
sgencoding = _get_symm_data("space_group_encoding")
abbrev_sg_mapping = _get_symm_data("abbreviated_spacegroup_symbols")
translations = {k: Fraction(v) for k, v in _get_symm_data("translations").items()}
full_sg_mapping = {v["full_symbol"]: k for k, v in _get_symm_data("space_group_encoding").items()}
def __init__(self, int_symbol):
"""
Initializes a Space Group from its full or abbreviated international
symbol. Only standard settings are supported.
Args:
int_symbol (str): Full International (e.g., "P2/m2/m2/m") or
Hermann-Mauguin Symbol ("Pmmm") or abbreviated symbol. The
notation is a LaTeX-like string, with screw axes being
represented by an underscore. For example, "P6_3/mmc".
Alternative settings can be access by adding a ":identifier".
For example, the hexagonal setting for rhombohedral cells can be
accessed by adding a ":H", e.g., "R-3m:H". To find out all
possible settings for a spacegroup, use the get_settings
classmethod. Alternative origin choices can be indicated by a
translation vector, e.g., 'Fm-3m(a-1/4,b-1/4,c-1/4)'.
"""
int_symbol = re.sub(r" ", "", int_symbol)
if int_symbol in SpaceGroup.abbrev_sg_mapping:
int_symbol = SpaceGroup.abbrev_sg_mapping[int_symbol]
elif int_symbol in SpaceGroup.full_sg_mapping:
int_symbol = SpaceGroup.full_sg_mapping[int_symbol]
for spg in SpaceGroup.SYMM_OPS:
if int_symbol in [spg["hermann_mauguin"], spg["universal_h_m"]]:
ops = [SymmOp.from_xyz_string(s) for s in spg["symops"]]
self.symbol = re.sub(r":", "", re.sub(r" ", "", spg["universal_h_m"]))
if int_symbol in SpaceGroup.sgencoding:
self.full_symbol = SpaceGroup.sgencoding[int_symbol]["full_symbol"]
self.point_group = SpaceGroup.sgencoding[int_symbol]["point_group"]
else:
self.full_symbol = re.sub(r" ", "", spg["universal_h_m"])
self.point_group = spg["schoenflies"]
self.int_number = spg["number"]
self.order = len(ops)
self._symmetry_ops = ops
break
else:
if int_symbol not in SpaceGroup.sgencoding:
raise ValueError(f"Bad international symbol {int_symbol}")
data = SpaceGroup.sgencoding[int_symbol]
self.symbol = int_symbol
# TODO: Support different origin choices.
enc = list(data["enc"])
inversion = int(enc.pop(0))
ngen = int(enc.pop(0))
symm_ops = [np.eye(4)]
if inversion:
symm_ops.append(np.array([[-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]))
for i in range(ngen):
m = np.eye(4)
m[:3, :3] = SpaceGroup.gen_matrices[enc.pop(0)]
m[0, 3] = SpaceGroup.translations[enc.pop(0)]
m[1, 3] = SpaceGroup.translations[enc.pop(0)]
m[2, 3] = SpaceGroup.translations[enc.pop(0)]
symm_ops.append(m)
self.generators = symm_ops
self.full_symbol = data["full_symbol"]
self.point_group = data["point_group"]
self.int_number = data["int_number"]
self.order = data["order"]
self._symmetry_ops = None
def _generate_full_symmetry_ops(self):
symm_ops = np.array(self.generators)
for op in symm_ops:
op[0:3, 3] = np.mod(op[0:3, 3], 1)
new_ops = symm_ops
while len(new_ops) > 0 and len(symm_ops) < self.order:
gen_ops = []
for g in new_ops:
temp_ops = np.einsum("ijk,kl", symm_ops, g)
for op in temp_ops:
op[0:3, 3] = np.mod(op[0:3, 3], 1)
ind = np.where(np.abs(1 - op[0:3, 3]) < 1e-5)
op[ind, 3] = 0
if not in_array_list(symm_ops, op):
gen_ops.append(op)
symm_ops = np.append(symm_ops, [op], axis=0)
new_ops = gen_ops
assert len(symm_ops) == self.order
return symm_ops
@classmethod
def get_settings(cls, int_symbol):
"""
Returns all the settings for a particular international symbol.
Args:
int_symbol (str): Full International (e.g., "P2/m2/m2/m") or
Hermann-Mauguin Symbol ("Pmmm") or abbreviated symbol. The
notation is a LaTeX-like string, with screw axes being
represented by an underscore. For example, "P6_3/mmc".
"""
symbols = []
if int_symbol in SpaceGroup.abbrev_sg_mapping:
symbols.append(SpaceGroup.abbrev_sg_mapping[int_symbol])
int_number = SpaceGroup.sgencoding[int_symbol]["int_number"]
elif int_symbol in SpaceGroup.full_sg_mapping:
symbols.append(SpaceGroup.full_sg_mapping[int_symbol])
int_number = SpaceGroup.sgencoding[int_symbol]["int_number"]
else:
for spg in SpaceGroup.SYMM_OPS:
if int_symbol in [
re.split(r"\(|:", spg["hermann_mauguin"])[0],
re.split(r"\(|:", spg["universal_h_m"])[0],
]:
int_number = spg["number"]
break
for spg in SpaceGroup.SYMM_OPS:
if int_number == spg["number"]:
symbols.append(spg["hermann_mauguin"])
symbols.append(spg["universal_h_m"])
return set(symbols)
@property
def symmetry_ops(self):
"""
Full set of symmetry operations as matrices. Lazily initialized as
generation sometimes takes a bit of time.
"""
if self._symmetry_ops is None:
self._symmetry_ops = [SymmOp(m) for m in self._generate_full_symmetry_ops()]
return self._symmetry_ops
def get_orbit(self, p, tol=1e-5):
"""
Returns the orbit for a point.
Args:
p: Point as a 3x1 array.
tol: Tolerance for determining if sites are the same. 1e-5 should
be sufficient for most purposes. Set to 0 for exact matching
(and also needed for symbolic orbits).
Returns:
([array]) Orbit for point.
"""
orbit = []
for o in self.symmetry_ops:
pp = o.operate(p)
pp = np.mod(np.round(pp, decimals=10), 1)
if not in_array_list(orbit, pp, tol=tol):
orbit.append(pp)
return orbit
def is_compatible(self, lattice, tol=1e-5, angle_tol=5):
"""
Checks whether a particular lattice is compatible with the
*conventional* unit cell.
Args:
lattice (Lattice): A Lattice.
tol (float): The tolerance to check for equality of lengths.
angle_tol (float): The tolerance to check for equality of angles
in degrees.
"""
abc = lattice.lengths
angles = lattice.angles
crys_system = self.crystal_system
def check(param, ref, tolerance):
return all(abs(i - j) < tolerance for i, j in zip(param, ref) if j is not None)
if crys_system == "cubic":
a = abc[0]
return check(abc, [a, a, a], tol) and check(angles, [90, 90, 90], angle_tol)
if crys_system == "hexagonal" or (
crys_system == "trigonal"
and (
self.symbol.endswith("H")
or self.int_number
in [
143,
144,
145,
147,
149,
150,
151,
152,
153,
154,
156,
157,
158,
159,
162,
163,
164,
165,
]
)
):
a = abc[0]
return check(abc, [a, a, None], tol) and check(angles, [90, 90, 120], angle_tol)
if crys_system == "trigonal":
a = abc[0]
alpha = angles[0]
return check(abc, [a, a, a], tol) and check(angles, [alpha, alpha, alpha], angle_tol)
if crys_system == "tetragonal":
a = abc[0]
return check(abc, [a, a, None], tol) and check(angles, [90, 90, 90], angle_tol)
if crys_system == "orthorhombic":
return check(angles, [90, 90, 90], angle_tol)
if crys_system == "monoclinic":
return check(angles, [90, None, 90], angle_tol)
return True
@property
def crystal_system(self):
"""
:return: Crystal system for space group.
"""
i = self.int_number
if i <= 2:
return "triclinic"
if i <= 15:
return "monoclinic"
if i <= 74:
return "orthorhombic"
if i <= 142:
return "tetragonal"
if i <= 167:
return "trigonal"
if i <= 194:
return "hexagonal"
return "cubic"
def is_subgroup(self, supergroup):
"""
True if this space group is a subgroup of the supplied group.
Args:
group (Spacegroup): Supergroup to test.
Returns:
True if this space group is a subgroup of the supplied group.
"""
if len(supergroup.symmetry_ops) < len(self.symmetry_ops):
return False
groups = [[supergroup.int_number]]
all_groups = [supergroup.int_number]
max_subgroups = {int(k): v for k, v in _get_symm_data("maximal_subgroups").items()}
while True:
new_sub_groups = set()
for i in groups[-1]:
new_sub_groups.update([j for j in max_subgroups[i] if j not in all_groups])
if self.int_number in new_sub_groups:
return True
if len(new_sub_groups) == 0:
break
groups.append(new_sub_groups)
all_groups.extend(new_sub_groups)
return False
def is_supergroup(self, subgroup):
"""
True if this space group is a supergroup of the supplied group.
Args:
subgroup (Spacegroup): Subgroup to test.
Returns:
True if this space group is a supergroup of the supplied group.
"""
return subgroup.is_subgroup(self)
@classmethod
def from_int_number(cls, int_number, hexagonal=True):
"""
Obtains a SpaceGroup from its international number.
Args:
int_number (int): International number.
hexagonal (bool): For rhombohedral groups, whether to return the
hexagonal setting (default) or rhombohedral setting.
Returns:
(SpaceGroup)
"""
sym = sg_symbol_from_int_number(int_number, hexagonal=hexagonal)
if not hexagonal and int_number in [146, 148, 155, 160, 161, 166, 167]:
sym += ":R"
return SpaceGroup(sym)
def __str__(self):
return "Spacegroup %s with international number %d and order %d" % (
self.symbol,
self.int_number,
len(self.symmetry_ops),
)
def to_pretty_string(self):
"""
:return: Spacegroup string.
"""
return self.symbol
def sg_symbol_from_int_number(int_number, hexagonal=True):
"""
Obtains a SpaceGroup name from its international number.
Args:
int_number (int): International number.
hexagonal (bool): For rhombohedral groups, whether to return the
hexagonal setting (default) or rhombohedral setting.
Returns:
(str) Spacegroup symbol
"""
syms = []
for n, v in _get_symm_data("space_group_encoding").items():
if v["int_number"] == int_number:
syms.append(n)
if len(syms) == 0:
raise ValueError("Invalid international number!")
if len(syms) == 2:
for sym in syms:
if "e" in sym:
return sym
if hexagonal:
syms = list(filter(lambda s: s.endswith("H"), syms))
else:
syms = list(filter(lambda s: not s.endswith("H"), syms))
return syms.pop()
def in_array_list(array_list, a, tol=1e-5):
"""
Extremely efficient nd-array comparison using numpy's broadcasting. This
function checks if a particular array a, is present in a list of arrays.
It works for arrays of any size, e.g., even matrix searches.
Args:
array_list ([array]): A list of arrays to compare to.
a (array): The test array for comparison.
tol (float): The tolerance. Defaults to 1e-5. If 0, an exact match is
done.
Returns:
(bool)
"""
if len(array_list) == 0:
return False
axes = tuple(range(1, a.ndim + 1))
if not tol:
return np.any(np.all(np.equal(array_list, a[None, :]), axes))
return np.any(np.sum(np.abs(array_list - a[None, :]), axes) < tol)
|
materialsproject/pymatgen
|
pymatgen/symmetry/groups.py
|
Python
|
mit
| 19,338
|
[
"CRYSTAL",
"pymatgen"
] |
8d32b1cfec0a7cb1e3347fb2a829b8f183c80087bb19a16ec0ac3d0741308f03
|
"""Detect viral infections via bwa alignment of unaligned reads.
This is primarily useful for cancer samples where viral infection can
inform treatment.
"""
import glob
import os
from bcbio import bam, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import vcfutils
def run(bam_file, data, out_dir):
"""Run viral QC analysis.
"""
viral_target = "gdc-viral"
out = {}
if vcfutils.get_paired_phenotype(data):
viral_refs = [x for x in dd.get_viral_files(data) if os.path.basename(x) == "%s.fa" % viral_target]
if viral_refs and utils.file_exists(viral_refs[0]):
viral_ref = viral_refs[0]
viral_bam = os.path.join(utils.safe_makedir(out_dir),
"%s-%s.bam" % (dd.get_sample_name(data),
utils.splitext_plus(os.path.basename(viral_ref))[0]))
out_file = "%s-counts.txt" % utils.splitext_plus(viral_bam)[0]
if not utils.file_uptodate(out_file, bam_file):
if not utils.file_uptodate(viral_bam, bam_file):
with file_transaction(data, viral_bam) as tx_out_file:
cores = dd.get_num_cores(data)
tmpfile = "%s-tmp" % utils.splitext_plus(tx_out_file)[0]
cmd = ("samtools view -u -f 4 {bam_file} | "
"bamtofastq collate=0 | "
"bwa mem -t {cores} {viral_ref} - | "
"bamsort tmpfile={tmpfile} inputthreads={cores} outputthreads={cores} "
"inputformat=sam index=1 indexfilename={tx_out_file}.bai O={tx_out_file}")
do.run(cmd.format(**locals()), "Compare unmapped reads to viral genome")
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
out_handle.write("# sample\t%s\n" % dd.get_sample_name(data))
for info in bam.idxstats(viral_bam, data):
if info.aligned > 0:
out_handle.write("%s\t%s\n" % (info.contig, info.aligned))
out["base"] = out_file
return out
def get_files(data):
"""Retrieve pre-installed viral reference files.
"""
all_files = glob.glob(os.path.normpath(os.path.join(os.path.dirname(dd.get_ref_file(data)),
os.pardir, "viral", "*")))
return sorted(all_files)
|
biocyberman/bcbio-nextgen
|
bcbio/qc/viral.py
|
Python
|
mit
| 2,676
|
[
"BWA"
] |
73f9db1ce9e64b90e3b95dd9bf95d87bb17a2f9c4c37d5e4f33b97f3f4a226d4
|
""" Example smearing script
This script:
* Reads in mc spectra from hdf5
* Smears spectra, default is to use weighted Gaussian method, but can
also use specify random Gaussian method via command line
* Smeared spectrum is saved to the same directory with ``_#value#ly`` or
``_#value#rs`` added to the file name
Examples:
To smear hdf5 file ``example.hdf5`` using the random Gaussian method::
$ python dump_smeared.py --smear_method "random" /path/to/example.hdf5
This will create the smeared hdf5 file ``/path/to/example_200ly.hdf5``.
.. note:: Valid smear methods include:
* "weight", default
* "random"
"""
import echidna.output.store as store
import echidna.core.smear as smear
import echidna.utilities as utilities
import os
_logger = utilities.start_logging()
def main(args):
"""Smears energy and dumps spectra.
Args:
args (Namespace): Container for arguments. See::
$ python dump_smeared_energy.py -h
Raises:
ValueError: If destination directory does not exist.
"""
if args.dest:
if os.path.isdir(args.dest):
directory = args.dest
if directory[-1] != "/":
directory += "/"
else:
raise ValueError("%s does not exist" % args.dest)
else:
directory = os.path.dirname(args.path) # strip filename
if directory:
directory += '/'
else:
directory = './'
# strip directory and extension
filename = os.path.splitext(os.path.basename(args.path))[0]
if args.energy_resolution:
if args.gaus:
energy_smear = smear.EnergySmearRes(poisson=False)
else:
energy_smear = smear.EnergySmearRes(poisson=True)
energy_smear.set_resolution(args.energy_resolution)
else: # use light yield
if args.gaus:
energy_smear = smear.EnergySmearLY(poisson=False)
else:
energy_smear = smear.EnergySmearLY(poisson=True)
energy_smear.set_resolution(args.light_yield)
spectrum = store.load(args.path)
if args.smear_method == "weight": # Use default smear method
for par in spectrum.get_config().get_pars():
if "energy" in par:
energy_par = par
spectrum = energy_smear.weighted_smear(spectrum,
par=energy_par)
elif args.smear_method == "random":
for par in spectrum.get_config().get_pars():
if "energy" in par:
energy_par = par
spectrum = energy_smear.random_smear(spectrum,
par=energy_par)
else: # Not a valid smear method
parser.error(args.smear_method + " is not a valid smear method")
if args.energy_resolution:
str_rs = str(args.energy_resolution)
filename = directory + filename + "_" + str_rs + "rs.hdf5"
else:
str_ly = str(args.light_yield)
if str_ly[-2:] == '.0':
str_ly = str_ly[:-2]
filename = directory + filename + "_" + str_ly + "ly.hdf5"
store.dump(filename, spectrum)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--smear_method", nargs='?', const="weight",
type=str, default="weight",
help="specify the smearing method to use")
parser.add_argument("-r", "--energy_resolution", default=None, type=float,
help="specify energy resolution "
"e.g. 0.05 for 5 percent")
parser.add_argument("-l", "--light_yield", default=200., type=float,
help="specify light yield"
"e.g. 200 for 200 NHit/MeV")
parser.add_argument("-g", "--gaus", dest="gaus", action="store_true",
help="Apply gaussian PDF")
parser.add_argument("-d", "--dest", default=None, type=str,
help="specify destination directory")
parser.add_argument("path", type=str,
help="specify path to hdf5 file")
parser.set_defaults(gaus=False)
args = parser.parse_args()
try:
main(args)
except Exception:
_logger.exception("echidna terminated because of the following error.")
|
jwaterfield/echidna
|
echidna/scripts/dump_smeared_energy.py
|
Python
|
mit
| 4,361
|
[
"Gaussian"
] |
f692a4b1262206df7d720667891446b90d9d33674235cb5620287398fad3128f
|
#from __future__ import division, print_function
#from __future__ import absolute_import
#from __future__ import unicode_literals
from configparser import ConfigParser
from sys import platform, stdout
from os import path, sep, listdir, access, X_OK, environ, pathsep
# from pkg_resources import resource_stream, resource_filename
from pysces import output_dir
from warnings import warn
_DEFAULT_CONFIG = {'Settings': {
'maxima_path': 'C:\\maxima?\\bin\\maxima.bat'}}
_DEFAULT_CONF_NAME = 'default_config.ini'
_USER_CONF_PATH = path.join(output_dir, 'psctb_config.ini')
class ConfigReader:
_config = None
@classmethod
def reload_config(cls):
cls._config = None
@classmethod
def get_config(cls):
if not cls._config:
cls._setup_config()
return cls._config
@classmethod
def _setup_config(cls):
default_conf = ConfigParser()
default_conf._sections = _DEFAULT_CONFIG
try:
if not path.exists(_USER_CONF_PATH):
ConfigWriter.write_config(_DEFAULT_CONFIG, _USER_CONF_PATH)
user_conf = ConfigParser()
user_conf._sections = _DEFAULT_CONFIG
else:
user_conf = cls._read_config(_USER_CONF_PATH)
ConfigChecker.check_config('user configuration',
_USER_CONF_PATH,
user_conf._sections)
except MissingSection as e:
solution = ('falling back to default configuration.'
'\nDelete configuration file to restore defaults')
ConfigChecker.warn_user(e, solution)
user_conf = ConfigParser()
user_conf.add_section('Settings')
except MissingSetting as e:
solution = ('falling back to default configuration.'
'\nSpecify setting to avoid using defaults')
ConfigChecker.warn_user(e, solution)
cls._config = cls._compose_config(default_conf, user_conf)
try:
if platform == 'win32':
maxima_path_list = PathFinder.find_path_to(
cls._config['maxima_path'])
if not len(maxima_path_list) > 0:
raise IOError(2,
'No valid path to specified file',
cls._config['maxima_path'])
maxima_path = sorted(maxima_path_list)[-1]
cls._config['maxima_path'] = maxima_path
else:
maxima_path = PathFinder.which('maxima')
if not maxima_path:
raise IOError(2,
'Maxima not installed',
'command not found')
cls._config['maxima_path'] = 'maxima'
except IOError as e:
solution = ('Please check that configuration file specifies '
'the correct path for Maxima and '
'that Maxima is installed correctly before '
'attempting to generate new results with SymCA '
'(see documentation for details).')
ConfigChecker.warn_user(e, solution)
cls._config['maxima_path'] = None
cls._config['platform'] = platform
cls._config['stdout'] = stdout
@staticmethod
def _read_config(file_or_path):
conf = ConfigParser()
if type(file_or_path) is str:
conf.read(file_or_path)
else:
conf.readfp(file_or_path)
return conf
@staticmethod
def _compose_config(default_conf, user_conf):
conf_dict = {}
conf_dict.update(default_conf._sections['Settings'])
conf_dict.update(user_conf._sections['Settings'])
if '__name__' in conf_dict:
conf_dict.pop('__name__')
return conf_dict
class ConfigChecker:
@staticmethod
def _has_all_sections(config_name, config_path, config_dict):
error_string = ('The {config_name} located at\n{config_path}\n'
'does not contain the required section\n'
'"{section}".')
for section in list(_DEFAULT_CONFIG.keys()):
if section not in config_dict:
raise MissingSection(
error_string.format(**locals()))
@staticmethod
def _has_all_settings(config_name, config_path, config_dict):
error_string = ('The {config_name} located at\n{config_path}\n'
'does not contain the required setting\n'
'"{setting}"\nunder the section\n"{section}".')
for section in list(_DEFAULT_CONFIG.keys()):
for setting in list(_DEFAULT_CONFIG[section].keys()):
if setting not in config_dict[section]:
raise MissingSetting(
error_string.format(**locals()))
@staticmethod
def check_config(config_name, config_path, config_dict):
ConfigChecker._has_all_sections(config_name, config_path, config_dict)
ConfigChecker._has_all_settings(config_name, config_path, config_dict)
@staticmethod
def _get_exception_name(exception):
class_name = str(exception.__class__)
return class_name[:-2].split('.')[-1]
@staticmethod
def warn_user(exception, solution):
warning_string = ('\n\n'
'The following error was encountered:\n"{message}"'
'\n\n{solution}')
if type(exception) is IOError:
exception.message = exception.strerror + ':\n' + exception.filename
message = ConfigChecker._get_exception_name(exception) + \
' - ' + exception.message
warn(warning_string.format(**locals()))
class ConfigWriter:
@staticmethod
def write_config(config_dict, config_path):
conf = ConfigParser()
for section, settings in config_dict.items():
conf.add_section(section)
for setting, value in settings.items():
conf.set(section, setting, value)
with open(config_path, 'w') as f:
conf.write(f)
class MissingSetting(Exception):
pass
class MissingSection(Exception):
pass
class PathFinder:
@staticmethod
def find_path_to(wildcard_path):
path_parts = wildcard_path.split(sep)
if platform == 'win32':
new_paths = [path_parts.pop(0) + sep]
else:
new_paths = [sep]
for i, path_part in enumerate(path_parts):
if '?' in path_part:
path_part = path_part[:-1]
new_new_paths = []
for each_base in new_paths:
possible_matches = PathFinder.find_match(each_base,
path_part)
if len(possible_matches) > 0:
for match in possible_matches:
new_new_paths.append(path.join(each_base, match))
new_paths = new_new_paths
else:
new_new_paths = []
for each_path in new_paths:
new_path = path.join(each_path, path_part)
if path.exists(new_path):
new_new_paths.append(new_path)
new_paths = new_new_paths
return new_paths
@staticmethod
def find_match(base_dir, to_match):
matches = []
try:
dirs_in_basedir = listdir(base_dir)
except OSError:
dirs_in_basedir = []
for directory in dirs_in_basedir:
if to_match in directory:
matches.append(directory)
return matches
@staticmethod
def which(program):
def is_exe(fpath):
return path.isfile(fpath) and access(fpath, X_OK)
fpath, fname = path.split(program)
if fpath:
if is_exe(program):
return program
else:
for epath in environ["PATH"].split(pathsep):
epath = epath.strip('"')
exe_file = path.join(epath, program)
if is_exe(exe_file):
return exe_file
return None
|
PySCeS/PyscesToolbox
|
psctb/utils/config.py
|
Python
|
bsd-3-clause
| 8,339
|
[
"PySCeS"
] |
4d414a91522394dd03c4f5ae416968ffa529da7700837b5c9300a9e260517c65
|
"""
# Notes:
- This simulation seeks to emulate the COBAHH benchmark simulations of (Brette
et al. 2007) using the Brian2 simulator for speed benchmark comparison to
DynaSim. However, this simulation includes CLOCK-DRIVEN synapses, for direct
comparison to DynaSim's clock-driven architecture. The synaptic connections
are "low-density", with only a 2% probability of connection.
- The time taken to simulate will be indicated in the stdout log file
'~/batchdirs/brian_benchmark_COBAHH_clocksyn_lodens_128/pbsout/brian_benchmark_COBAHH_clocksyn_lodens_128.out'
- Note that this code has been slightly modified from the original (Brette et
al. 2007) benchmarking code, available here on ModelDB:
https://senselab.med.yale.edu/modeldb/showModel.cshtml?model=83319 in order
to work with version 2 of the Brian simulator (aka Brian2), and also modified
to change the model being benchmarked, etc.
# References:
- Brette R, Rudolph M, Carnevale T, Hines M, Beeman D, Bower JM, et al.
Simulation of networks of spiking neurons: A review of tools and strategies.
Journal of Computational Neuroscience 2007;23:349–98.
doi:10.1007/s10827-007-0038-6.
- Goodman D, Brette R. Brian: a simulator for spiking neural networks in Python.
Frontiers in Neuroinformatics 2008;2. doi:10.3389/neuro.11.005.2008.
"""
from brian2 import *
# Parameters
cells = 128
defaultclock.dt = 0.01*ms
area = 20000*umetre**2
Cm = (1*ufarad*cmetre**-2) * area
gl = (5e-5*siemens*cmetre**-2) * area
El = -60*mV
EK = -90*mV
ENa = 50*mV
g_na = (100*msiemens*cmetre**-2) * area
g_kd = (30*msiemens*cmetre**-2) * area
VT = -63*mV
# Synaptic strengths
gAMPA = (0.1*msiemens*cmetre**-2)* area
gGABAA = (0.06*msiemens*cmetre**-2)* area
# Synaptic time constants
tauAMPA = 2
tauGABAA = 5
# Synaptic reversal potentials
EAMPA = 1*mV
EGABAA = -80*mV
# The model
eqs = Equations('''
dv/dt = (gl*(El-v)-
gAMPA/cells*sAMPAtotal*(v-EAMPA)-
gGABAA/cells*sGABAAtotal*(v-EGABAA)-
g_na*(m*m*m)*h*(v-ENa)-
g_kd*(n*n*n*n)*(v-EK))/Cm : volt
dm/dt = alpha_m*(1-m)-beta_m*m : 1
dn/dt = alpha_n*(1-n)-beta_n*n : 1
dh/dt = alpha_h*(1-h)-beta_h*h : 1
alpha_m = 0.32*(mV**-1)*(13*mV-v+VT)/
(exp((13*mV-v+VT)/(4*mV))-1.)/ms : Hz
beta_m = 0.28*(mV**-1)*(v-VT-40*mV)/
(exp((v-VT-40*mV)/(5*mV))-1)/ms : Hz
alpha_h = 0.128*exp((17*mV-v+VT)/(18*mV))/ms : Hz
beta_h = 4./(1+exp((40*mV-v+VT)/(5*mV)))/ms : Hz
alpha_n = 0.032*(mV**-1)*(15*mV-v+VT)/
(exp((15*mV-v+VT)/(5*mV))-1.)/ms : Hz
beta_n = .5*exp((10*mV-v+VT)/(40*mV))/ms : Hz
sAMPAtotal : 1
sGABAAtotal : 1
''')
# Construct intrinsic cells
P = NeuronGroup(cells, model=eqs, method='euler')
proportion=int(0.8*cells)
Pe = P[:proportion]
Pi = P[proportion:]
# Contruct synaptic network
sAMPA=Synapses(Pe,P,
model='''ds/dt=1000.*5.*(1 + tanh(v_pre/(4.*mV)))*(1-s)/ms - (s)/(2*ms) : 1 (clock-driven)
sAMPAtotal_post = s : 1 (summed)
''')
sAMPA.connect(p=0.02)
sGABAA_RETC=Synapses(Pi,P,
model='''ds/dt=1000.*2.*(1 + tanh(v_pre/(4.*mV)))*(1-s)/ms - s/(5*ms) : 1 (clock-driven)
sGABAAtotal_post = s : 1 (summed)
''')
sGABAA_RETC.connect(p=0.02)
# Initialization
P.v = 'El + (randn() * 5 - 5)*mV'
# Record a few traces
trace = StateMonitor(P, 'v', record=[1, 10, 100])
totaldata = StateMonitor(P, 'v', record=True)
run(0.5 * second, report='text')
# # If you want to plot:
# plot(trace.t/ms, trace[1].v/mV)
# plot(trace.t/ms, trace[10].v/mV)
# plot(trace.t/ms, trace[100].v/mV)
# xlabel('t (ms)')
# ylabel('v (mV)')
# show()
# # If you want to save data:
# print("Saving TC cell voltages!")
# numpy.savetxt("foo_totaldata.csv", totaldata.v/mV, delimiter=",")
|
asoplata/dynasim-benchmark-brette-2007
|
output/Brian2/brian2_benchmark_COBAHH_clocksyn_lodens_0128/brian2_benchmark_COBAHH_clocksyn_lodens_0128.py
|
Python
|
gpl-3.0
| 3,778
|
[
"Brian"
] |
1049bd588e71d8f30fd29d20aa4c6b9999fa68b6cb11e9391b8e9fc85cc872cf
|
import json
import sys
import os.path
from os.path import expanduser
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA256
from Crypto.Signature import PKCS1_PSS
#Author: Brian Greenberg 6/29/17
#JSON based config generator for groups to be used in conjunction with iptables like rules
#for an OpenFlow based switch controller
if len(sys.argv) > 3:
print "Too many arguments, use the -h option to see how to use this"
sys.exit()
commands = {"new":["-n","--n","--new","-new"],"modify":["-m","--m","--modify","-modify","-mod","--mod"],"verify":["-v","--v","--verify","-verify"],"help":["-h","--h","-help","--help"]}
def searchCmd(find):
for key in commands:
for val in commands[key]:
if val == find:
return key
return None
if len(sys.argv) <= 2 and (searchCmd(sys.argv[1]) != "help"):
print "Missing argument"
sys.exit()
outputFileName = ""
if len(sys.argv) > 2:
outputFileName = sys.argv[2]
configData = {}
#Find if an argument to the script is a member of the recognized command list
#and if so return the command type. Simplifies having varitions of the same command,
#such as --new vs -n for creating a new config file.
def getGroupInput(inputDict):
groupName = raw_input("What will this group be called? ")
#TODO: Check for valid IPv4 entries
IPv4list = raw_input("Enter the Ipv4 address you want in the group: ")
IPv4list = IPv4list.split(",")
#TODO: Check for valid Subnets
SubNetlist = raw_input("Enter the Ipv4 subnets you want in the group: ")
SubNetlist = SubNetlist.split(",")
#TODO: Check for valid FQDNs
FQDNlist = raw_input("Enter the domain names you want in the group: ")
FQDNlist = FQDNlist.split(",")
inputDict[groupName] = {"IPv4":IPv4list,"Subnet":SubNetlist,"FQDN":FQDNlist}
return inputDict
def makeNewConfig(inputDict,outputName):
#Generate new json config file
addNewGroup = True
print "Making new config file",outputName+".conf."
while addNewGroup:
addGroup = raw_input("Do you want to add a new group?[y/n]: ")
if addGroup == 'y':
getGroupInput(inputDict)
else:
addNewGroup = False
outputConf = json.JSONEncoder(sort_keys=True,indent=2).encode(inputDict)
outputFile = open(outputName+".conf",'w')
outputFile.write(outputConf)
outputFile.close()
#Generate signature file
sigGen(outputName)
print "Group config and signature file have been written in the current directory"
def sigGen(fileName):
privKeyLoc = raw_input("Enter the full path of the location of the private key you want to use to sign the group file (Default is ~/.ssh/id_rsa): ")
if privKeyLoc == '':
privKeyLoc = expanduser("~")+"/.ssh/id_rsa"
privKey = RSA.importKey(open(privKeyLoc,'r').read())
hash = SHA256.new(open(fileName+".conf",'r').read())
signer = PKCS1_PSS.new(privKey)
sigFile = open(fileName+".sig",'w')
signature = signer.sign(hash)
sigFile.write(signature)
sigFile.close()
def verifyConfig(fileName):
publicKeyLoc = raw_input("Enter the full path of the location of the public key you want to use to verify the group file (Default is ~/.ssh/id_rsa.pub): ")
if publicKeyLoc == '':
publicKeyLoc = expanduser("~")+"/.ssh/id_rsa.pub"
pubKey = RSA.importKey(open(publicKeyLoc,'r').read())
hash = SHA256.new(open(fileName+".conf",'r').read())
if(os.path.isfile(fileName+".sig") == False):
print "Could not find a signature file in this directory, be sure it has the same name as the group file"
sys.exit()
signature = open(fileName+".sig").read()
print "Verifying signature using ",fileName+".sig."
verifier = PKCS1_PSS.new(pubKey)
if verifier.verify(hash,signature):
print "Signature for config file is authentic."
else:
print "Signature not authentic, config file could have been modifed!"
def modifyConfig(fileName):
file = open(fileName+".conf",'r')
inputData = json.JSONDecoder().decode(file.read())
file.close()
print "Modifying group file",fileName+".conf","."
for group in inputData:
for data in inputData[group]:
print "In the ",group," group, the allowed ",data," is ",json.dumps(inputData[group][data],indent=2),"\n"
isChange = raw_input("Do you want to change this? [y/n] ")
if isChange == 'y':
newData = raw_input("What should it be changed to?")
inputData[group][data] = newData.split(",")
modNextGroup = raw_input("Group has been modified, do you want to change the next one?[y/n] ")
if modNextGroup != 'y':
break
newConfigData = json.JSONEncoder(sort_keys=True,indent=2).encode(inputData)
print "The changed config file is:\n",newConfigData
writeChanges = raw_input("Do you want to write these changes?[y/n] ")
if writeChanges == 'y':
newConfigFile = open(fileName+".conf",'w')
newConfigFile.write(newConfigData)
newConfigFile.close()
sigGen(fileName)
else:
print "Changes not written"
if (searchCmd(sys.argv[1]) == "new"):
modConf = 'n'
if (os.path.isfile(outputFileName+".conf") == True):
modConf = raw_input("There is an existing config file with this name in this directory, do you want to overwrite it? [y/n] ")
if modConf == 'y':
makeNewConfig(configData,outputFileName)
else:
sys.exit()
else:
makeNewConfig(configData,outputFileName)
elif(searchCmd(sys.argv[1]) == "modify"):
modifyConfig(outputFileName)
elif(searchCmd(sys.argv[1]) == "verify"):
verifyConfig(outputFileName)
elif(searchCmd(sys.argv[1]) == "help"):
print "Usage: ConfigGen [options] [name of group file]"
print "Options: -m/-modify, takes an existing group file and allows modification"
print " -n/-new, make a new group file"
print " -v/-verify, make sure the signature matches the group file"
print " -h/-help, display this text"
elif(searchCmd(sys.argv[1]) == None):
print "Command not recognized"
sys.exit()
|
bgreenb/FireWall
|
GroupGen.py
|
Python
|
mit
| 5,703
|
[
"Brian"
] |
72a91d6561852c697c7317b5c4ac23825eaf1e312ac6ac3665e95d1ad5f08776
|
import os.path as op
import shutil
import unittest
import numpy as np
import numpy.testing as npt
from scipy.stats import truncnorm
import pyhrf
from pyhrf import Condition
import pyhrf.sandbox.physio as phy
class SimulationTest(unittest.TestCase):
def setUp(self):
# called before any unit test of the class
#self.tmp_path = pyhrf.get_tmp_path() #create a temporary folder
#self.clean_tmp = False
#HACK:
self.tmp_path = '/home/tom/Data/Pyhrf/Test/Unittest'
self.clean_tmp = False
def tearDown(self):
# called after any unit test of the class
if self.clean_tmp:
pyhrf.verbose(1, 'cleaning temporary folder ...')
shutil.rmtree(self.tmp_path)
def test_simulate_asl_full_physio(self):
pyhrf.verbose.set_verbosity(0)
r = phy.simulate_asl_full_physio()
# let's just test the shapes of objects and the presence of some
# physio-specific simulation items
item_names = r.keys()
self.assertIn('perf_stim_induced', item_names)
self.assertIn('flow_induction', item_names)
self.assertIn('perf_stim_induced', item_names)
self.assertEqual(r['labels_vol'].shape, (3,1,2,2)) #cond, spatial axes
self.assertEqual(r['bold'].shape, (297, 4)) #nb scans, flat spatial axis
def test_simulate_asl_full_physio_outputs(self):
pyhrf.verbose.set_verbosity(1)
phy.simulate_asl_full_physio(self.tmp_path)
def makefn(fn):
return op.join(self.tmp_path, fn)
self.assertTrue(op.exists(makefn('flow_induction.nii')))
self.assertTrue(op.exists(makefn('neural_efficacies_audio.nii')))
def test_simulate_asl_physio_rfs(self):
pyhrf.verbose.set_verbosity(0)
r = phy.simulate_asl_physio_rfs()
# let's just test the shapes of objects and the presence of some
# physio-specific simulation items
item_names = r.keys()
self.assertIn('perf_stim_induced', item_names)
self.assertIn('primary_brf', item_names)
self.assertIn('perf_stim_induced', item_names)
self.assertEqual(r['labels_vol'].shape, (3,1,2,2)) #cond, spatial axes
self.assertEqual(r['bold'].shape, (321, 4)) #nb scans, flat spatial axis
#TODO: nb scans in final BOLD should be defined by the session length
# from the paradigm -> 297 instead of 321
def test_create_tbg_neural_efficacies(self):
""" Test the generation of neural efficacies from a truncated
bi-Gaussian mixture
"""
m_act = 5.
v_act = .05
v_inact = .05
cdef = [Condition(m_act=m_act, v_act=v_act, v_inact=v_inact)]
npos = 5000
labels = np.zeros((1,npos), dtype=int)
labels[0, :npos/2] = 1
phy_params = phy.PHY_PARAMS_FRISTON00
ne = phy.create_tbg_neural_efficacies(phy_params, cdef, labels)
#check shape consistency:
self.assertEqual(ne.shape, labels.shape)
#check that moments are close to theoretical ones
ne_act = ne[0, np.where(labels[0])]
ne_inact = ne[0, np.where(labels[0]==0)]
m_act_theo = truncnorm.mean(0, phy_params['eps_max'], loc=m_act,
scale=v_act**.5)
v_act_theo = truncnorm.var(0, phy_params['eps_max'], loc=m_act,
scale=v_act**.5)
(ne_act.mean(), m_act_theo)
npt.assert_approx_equal(ne_act.var(), v_act_theo, significant=2)
m_inact_theo = truncnorm.mean(0, phy_params['eps_max'], loc=0.,
scale=v_inact**.5)
v_inact_theo = truncnorm.var(0, phy_params['eps_max'], loc=0.,
scale=v_inact**.5)
npt.assert_approx_equal(ne_inact.mean(), m_inact_theo, significant=2)
npt.assert_approx_equal(ne_inact.var(), v_inact_theo, significant=2)
npt.assert_array_less(ne, phy_params)
npt.assert_array_less(0., ne)
def test_create_physio_brf(self):
phy_params = phy.PHY_PARAMS_FRISTON00
dt = .5
duration = 25.
brf = phy.create_physio_brf(phy_params, response_dt=dt,
response_duration=duration)
if 0:
import matplotlib.pyplot as plt
t = np.arange(brf.size) * dt
plt.plot(t, brf)
plt.title('BRF')
plt.show()
npt.assert_approx_equal(brf[0], 0., significant=4)
npt.assert_array_almost_equal(brf[-1], 0., decimal=4)
npt.assert_approx_equal(np.argmax(brf)*dt, 3.5, significant=5)
def test_create_physio_prf(self):
phy_params = phy.PHY_PARAMS_FRISTON00
dt = .5
duration = 25.
prf = phy.create_physio_prf(phy_params, response_dt=dt,
response_duration=duration)
if 0:
import matplotlib.pyplot as plt
t = np.arange(prf.size) * dt
plt.plot(t, prf)
plt.title('PRF')
plt.show()
npt.assert_approx_equal(prf[0], 0., significant=4)
npt.assert_array_almost_equal(prf[-1], 0., decimal=4)
npt.assert_approx_equal(np.argmax(prf)*dt, 2.5, significant=5)
def test_create_evoked_physio_signal(self):
import pyhrf.paradigm
phy_params = phy.PHY_PARAMS_FRISTON00
tr = 1.
duration = 20.
ne = np.array([[10., 5.]])
nb_conds, nb_vox = ne.shape
# one single stimulation at the begining
paradigm = pyhrf.paradigm.Paradigm({'c':[np.array([0.])]}, [duration],
{'c':[np.array([1.])]})
s, f, hbr, cbv = phy.create_evoked_physio_signals(phy_params, paradigm,
ne, tr)
#shape of a signal: (nb_vox, nb_scans)
if 0:
import matplotlib.pyplot as plt
t = np.arange(f[0].size) * tr
plt.plot(t, f[0])
plt.title('inflow')
plt.show()
self.assertEqual(s.shape, (int(duration/tr), nb_vox))
# check signal causality:
self.assertEqual(f[0,0], 1.)
npt.assert_approx_equal(f[-1,0], 1., significant=3)
# non-regression test:
self.assertEqual(np.argmax(f[:,0])*tr, 2)
def test_phy_integrate_euler(self):
phy_params = phy.PHY_PARAMS_FRISTON00
tstep = .05
nb_steps = 400
stim_duration = int(1/tstep)
stim = np.array([1.]*stim_duration + [0.]*(nb_steps-stim_duration))
epsilon = .5
s,f,q,v = phy.phy_integrate_euler(phy_params, tstep, stim, epsilon)
# signal must be causal:
self.assertEqual(f[0], 1.)
npt.assert_approx_equal(f[-1], 1., significant=3)
# non-regression checks:
npt.assert_approx_equal(np.argmax(f)*tstep, 2.3)
npt.assert_approx_equal(f.max(), 1.384, significant=4)
if 0:
import matplotlib.pyplot as plt
t = np.arange(nb_steps) * tstep
plt.plot(t,f)
plt.title('inflow')
plt.show()
####
def test_finite_dif_matrix(self):
phy.buildOrder1FiniteDiffMatrix(10)
return None
|
philouc/pyhrf
|
python/pyhrf/test/test_sandbox_physio.py
|
Python
|
gpl-3.0
| 7,291
|
[
"Gaussian"
] |
35d57f131cca12a792e51ccd44934cacfe554e14dc95e7169f47898dd5b06593
|
# $Id: nodes.py 6351 2010-07-03 14:19:09Z gbrandl $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Docutils document tree element class library.
Classes in CamelCase are abstract base classes or auxiliary classes. The one
exception is `Text`, for a text (PCDATA) node; uppercase is used to
differentiate from element classes. Classes in lower_case_with_underscores
are element classes, matching the XML element generic identifiers in the DTD_.
The position of each node (the level at which it can occur) is significant and
is represented by abstract base classes (`Root`, `Structural`, `Body`,
`Inline`, etc.). Certain transformations will be easier because we can use
``isinstance(node, base_class)`` to determine the position of the node in the
hierarchy.
.. _DTD: http://docutils.sourceforge.net/docs/ref/docutils.dtd
"""
__docformat__ = 'reStructuredText'
import re
import sys
import types
import unicodedata
import warnings
# ==============================
# Functional Node Base Classes
# ==============================
class Node(object):
"""Abstract base class of nodes in a document tree."""
parent = None
"""Back-reference to the Node immediately containing this Node."""
document = None
"""The `document` node at the root of the tree containing this Node."""
source = None
"""Path or description of the input source which generated this Node."""
line = None
"""The line number (1-based) of the beginning of this Node in `source`."""
def __nonzero__(self):
"""
Node instances are always true, even if they're empty. A node is more
than a simple container. Its boolean "truth" does not depend on
having one or more subnodes in the doctree.
Use `len()` to check node length. Use `None` to represent a boolean
false value.
"""
return True
if sys.version_info < (3,):
# on 2.x, str(node) will be a byte string with Unicode
# characters > 255 escaped; on 3.x this is no longer necessary
def __str__(self):
return unicode(self).encode('raw_unicode_escape')
def asdom(self, dom=None):
"""Return a DOM **fragment** representation of this Node."""
if dom is None:
import xml.dom.minidom as dom
domroot = dom.Document()
return self._dom_node(domroot)
def pformat(self, indent=' ', level=0):
"""
Return an indented pseudo-XML representation, for test purposes.
Override in subclasses.
"""
raise NotImplementedError
def copy(self):
"""Return a copy of self."""
raise NotImplementedError
def deepcopy(self):
"""Return a deep copy of self (also copying children)."""
raise NotImplementedError
def setup_child(self, child):
child.parent = self
if self.document:
child.document = self.document
if child.source is None:
child.source = self.document.current_source
if child.line is None:
child.line = self.document.current_line
def walk(self, visitor):
"""
Traverse a tree of `Node` objects, calling the
`dispatch_visit()` method of `visitor` when entering each
node. (The `walkabout()` method is similar, except it also
calls the `dispatch_departure()` method before exiting each
node.)
This tree traversal supports limited in-place tree
modifications. Replacing one node with one or more nodes is
OK, as is removing an element. However, if the node removed
or replaced occurs after the current node, the old node will
still be traversed, and any new nodes will not.
Within ``visit`` methods (and ``depart`` methods for
`walkabout()`), `TreePruningException` subclasses may be raised
(`SkipChildren`, `SkipSiblings`, `SkipNode`, `SkipDeparture`).
Parameter `visitor`: A `NodeVisitor` object, containing a
``visit`` implementation for each `Node` subclass encountered.
Return true if we should stop the traversal.
"""
stop = 0
visitor.document.reporter.debug(
'docutils.nodes.Node.walk calling dispatch_visit for %s'
% self.__class__.__name__)
try:
try:
visitor.dispatch_visit(self)
except (SkipChildren, SkipNode):
return stop
except SkipDeparture: # not applicable; ignore
pass
children = self.children
try:
for child in children[:]:
if child.walk(visitor):
stop = 1
break
except SkipSiblings:
pass
except StopTraversal:
stop = 1
return stop
def walkabout(self, visitor):
"""
Perform a tree traversal similarly to `Node.walk()` (which
see), except also call the `dispatch_departure()` method
before exiting each node.
Parameter `visitor`: A `NodeVisitor` object, containing a
``visit`` and ``depart`` implementation for each `Node`
subclass encountered.
Return true if we should stop the traversal.
"""
call_depart = 1
stop = 0
visitor.document.reporter.debug(
'docutils.nodes.Node.walkabout calling dispatch_visit for %s'
% self.__class__.__name__)
try:
try:
visitor.dispatch_visit(self)
except SkipNode:
return stop
except SkipDeparture:
call_depart = 0
children = self.children
try:
for child in children[:]:
if child.walkabout(visitor):
stop = 1
break
except SkipSiblings:
pass
except SkipChildren:
pass
except StopTraversal:
stop = 1
if call_depart:
visitor.document.reporter.debug(
'docutils.nodes.Node.walkabout calling dispatch_departure '
'for %s' % self.__class__.__name__)
visitor.dispatch_departure(self)
return stop
def _fast_traverse(self, cls):
"""Specialized traverse() that only supports instance checks."""
result = []
if isinstance(self, cls):
result.append(self)
for child in self.children:
result.extend(child._fast_traverse(cls))
return result
def _all_traverse(self):
"""Specialized traverse() that doesn't check for a condition."""
result = []
result.append(self)
for child in self.children:
result.extend(child._all_traverse())
return result
def traverse(self, condition=None,
include_self=1, descend=1, siblings=0, ascend=0):
"""
Return an iterable containing
* self (if include_self is true)
* all descendants in tree traversal order (if descend is true)
* all siblings (if siblings is true) and their descendants (if
also descend is true)
* the siblings of the parent (if ascend is true) and their
descendants (if also descend is true), and so on
If `condition` is not None, the iterable contains only nodes
for which ``condition(node)`` is true. If `condition` is a
node class ``cls``, it is equivalent to a function consisting
of ``return isinstance(node, cls)``.
If ascend is true, assume siblings to be true as well.
For example, given the following tree::
<paragraph>
<emphasis> <--- emphasis.traverse() and
<strong> <--- strong.traverse() are called.
Foo
Bar
<reference name="Baz" refid="baz">
Baz
Then list(emphasis.traverse()) equals ::
[<emphasis>, <strong>, <#text: Foo>, <#text: Bar>]
and list(strong.traverse(ascend=1)) equals ::
[<strong>, <#text: Foo>, <#text: Bar>, <reference>, <#text: Baz>]
"""
if ascend:
siblings=1
# Check for special argument combinations that allow using an
# optimized version of traverse()
if include_self and descend and not siblings:
if condition is None:
return self._all_traverse()
elif isinstance(condition, (types.ClassType, type)):
return self._fast_traverse(condition)
# Check if `condition` is a class (check for TypeType for Python
# implementations that use only new-style classes, like PyPy).
if isinstance(condition, (types.ClassType, type)):
node_class = condition
def condition(node, node_class=node_class):
return isinstance(node, node_class)
r = []
if include_self and (condition is None or condition(self)):
r.append(self)
if descend and len(self.children):
for child in self:
r.extend(child.traverse(
include_self=1, descend=1, siblings=0, ascend=0,
condition=condition))
if siblings or ascend:
node = self
while node.parent:
index = node.parent.index(node)
for sibling in node.parent[index+1:]:
r.extend(sibling.traverse(include_self=1, descend=descend,
siblings=0, ascend=0,
condition=condition))
if not ascend:
break
else:
node = node.parent
return r
def next_node(self, condition=None,
include_self=0, descend=1, siblings=0, ascend=0):
"""
Return the first node in the iterable returned by traverse(),
or None if the iterable is empty.
Parameter list is the same as of traverse. Note that
include_self defaults to 0, though.
"""
iterable = self.traverse(condition=condition,
include_self=include_self, descend=descend,
siblings=siblings, ascend=ascend)
try:
return iterable[0]
except IndexError:
return None
if sys.version_info < (3,):
class reprunicode(unicode):
"""
A class that removes the initial u from unicode's repr.
"""
def __repr__(self):
return unicode.__repr__(self)[1:]
else:
reprunicode = unicode
class Text(Node, reprunicode):
"""
Instances are terminal nodes (leaves) containing text only; no child
nodes or attributes. Initialize by passing a string to the constructor.
Access the text itself with the `astext` method.
"""
tagname = '#text'
children = ()
"""Text nodes have no children, and cannot have children."""
if sys.version_info > (3,):
def __new__(cls, data, rawsource=None):
"""Prevent the rawsource argument from propagating to str."""
if isinstance(data, bytes):
raise TypeError('expecting str data, not bytes')
return reprunicode.__new__(cls, data)
else:
def __new__(cls, data, rawsource=None):
"""Prevent the rawsource argument from propagating to str."""
return reprunicode.__new__(cls, data)
def __init__(self, data, rawsource=''):
self.rawsource = rawsource
"""The raw text from which this element was constructed."""
def shortrepr(self, maxlen=18):
data = self
if len(data) > maxlen:
data = data[:maxlen-4] + ' ...'
return '<%s: %s>' % (self.tagname, repr(reprunicode(data)))
def __repr__(self):
return self.shortrepr(maxlen=68)
def _dom_node(self, domroot):
return domroot.createTextNode(unicode(self))
def astext(self):
return reprunicode(self)
# Note about __unicode__: The implementation of __unicode__ here,
# and the one raising NotImplemented in the superclass Node had
# to be removed when changing Text to a subclass of unicode instead
# of UserString, since there is no way to delegate the __unicode__
# call to the superclass unicode:
# unicode itself does not have __unicode__ method to delegate to
# and calling unicode(self) or unicode.__new__ directly creates
# an infinite loop
def copy(self):
return self.__class__(reprunicode(self), rawsource=self.rawsource)
def deepcopy(self):
return self.copy()
def pformat(self, indent=' ', level=0):
result = []
indent = indent * level
for line in self.splitlines():
result.append(indent + line + '\n')
return ''.join(result)
# rstrip and lstrip are used by substitution definitions where
# they are expected to return a Text instance, this was formerly
# taken care of by UserString. Note that then and now the
# rawsource member is lost.
def rstrip(self, chars=None):
return self.__class__(reprunicode.rstrip(self, chars))
def lstrip(self, chars=None):
return self.__class__(reprunicode.lstrip(self, chars))
class Element(Node):
"""
`Element` is the superclass to all specific elements.
Elements contain attributes and child nodes. Elements emulate
dictionaries for attributes, indexing by attribute name (a string). To
set the attribute 'att' to 'value', do::
element['att'] = 'value'
There are two special attributes: 'ids' and 'names'. Both are
lists of unique identifiers, and names serve as human interfaces
to IDs. Names are case- and whitespace-normalized (see the
fully_normalize_name() function), and IDs conform to the regular
expression ``[a-z](-?[a-z0-9]+)*`` (see the make_id() function).
Elements also emulate lists for child nodes (element nodes and/or text
nodes), indexing by integer. To get the first child node, use::
element[0]
Elements may be constructed using the ``+=`` operator. To add one new
child node to element, do::
element += node
This is equivalent to ``element.append(node)``.
To add a list of multiple child nodes at once, use the same ``+=``
operator::
element += [node1, node2]
This is equivalent to ``element.extend([node1, node2])``.
"""
list_attributes = ('ids', 'classes', 'names', 'dupnames', 'backrefs')
"""List attributes, automatically initialized to empty lists for
all nodes."""
tagname = None
"""The element generic identifier. If None, it is set as an instance
attribute to the name of the class."""
child_text_separator = '\n\n'
"""Separator for child nodes, used by `astext()` method."""
def __init__(self, rawsource='', *children, **attributes):
self.rawsource = rawsource
"""The raw text from which this element was constructed."""
self.children = []
"""List of child nodes (elements and/or `Text`)."""
self.extend(children) # maintain parent info
self.attributes = {}
"""Dictionary of attribute {name: value}."""
# Initialize list attributes.
for att in self.list_attributes:
self.attributes[att] = []
for att, value in attributes.items():
att = att.lower()
if att in self.list_attributes:
# mutable list; make a copy for this node
self.attributes[att] = value[:]
else:
self.attributes[att] = value
if self.tagname is None:
self.tagname = self.__class__.__name__
def _dom_node(self, domroot):
element = domroot.createElement(self.tagname)
for attribute, value in self.attlist():
if isinstance(value, list):
value = ' '.join([serial_escape('%s' % v) for v in value])
element.setAttribute(attribute, '%s' % value)
for child in self.children:
element.appendChild(child._dom_node(domroot))
return element
def __repr__(self):
data = ''
for c in self.children:
data += c.shortrepr()
if len(data) > 60:
data = data[:56] + ' ...'
break
if self['names']:
return '<%s "%s": %s>' % (self.__class__.__name__,
'; '.join(self['names']), data)
else:
return '<%s: %s>' % (self.__class__.__name__, data)
def shortrepr(self):
if self['names']:
return '<%s "%s"...>' % (self.__class__.__name__,
'; '.join(self['names']))
else:
return '<%s...>' % self.tagname
def __unicode__(self):
if self.children:
return u'%s%s%s' % (self.starttag(),
''.join([unicode(c) for c in self.children]),
self.endtag())
else:
return self.emptytag()
if sys.version_info > (3,):
# 2to3 doesn't convert __unicode__ to __str__
__str__ = __unicode__
def starttag(self):
parts = [self.tagname]
for name, value in self.attlist():
if value is None: # boolean attribute
parts.append(name)
elif isinstance(value, list):
values = [serial_escape('%s' % v) for v in value]
parts.append('%s="%s"' % (name, ' '.join(values)))
else:
parts.append('%s="%s"' % (name, value))
return '<%s>' % ' '.join(parts)
def endtag(self):
return '</%s>' % self.tagname
def emptytag(self):
return u'<%s/>' % ' '.join([self.tagname] +
['%s="%s"' % (n, v)
for n, v in self.attlist()])
def __len__(self):
return len(self.children)
def __contains__(self, key):
# support both membership test for children and attributes
# (has_key is translated to "in" by 2to3)
if isinstance(key, basestring):
return key in self.attributes
return key in self.children
def __getitem__(self, key):
if isinstance(key, basestring):
return self.attributes[key]
elif isinstance(key, int):
return self.children[key]
elif isinstance(key, types.SliceType):
assert key.step in (None, 1), 'cannot handle slice with stride'
return self.children[key.start:key.stop]
else:
raise TypeError, ('element index must be an integer, a slice, or '
'an attribute name string')
def __setitem__(self, key, item):
if isinstance(key, basestring):
self.attributes[str(key)] = item
elif isinstance(key, int):
self.setup_child(item)
self.children[key] = item
elif isinstance(key, types.SliceType):
assert key.step in (None, 1), 'cannot handle slice with stride'
for node in item:
self.setup_child(node)
self.children[key.start:key.stop] = item
else:
raise TypeError, ('element index must be an integer, a slice, or '
'an attribute name string')
def __delitem__(self, key):
if isinstance(key, basestring):
del self.attributes[key]
elif isinstance(key, int):
del self.children[key]
elif isinstance(key, types.SliceType):
assert key.step in (None, 1), 'cannot handle slice with stride'
del self.children[key.start:key.stop]
else:
raise TypeError, ('element index must be an integer, a simple '
'slice, or an attribute name string')
def __add__(self, other):
return self.children + other
def __radd__(self, other):
return other + self.children
def __iadd__(self, other):
"""Append a node or a list of nodes to `self.children`."""
if isinstance(other, Node):
self.append(other)
elif other is not None:
self.extend(other)
return self
def astext(self):
return self.child_text_separator.join(
[child.astext() for child in self.children])
def non_default_attributes(self):
atts = {}
for key, value in self.attributes.items():
if self.is_not_default(key):
atts[key] = value
return atts
def attlist(self):
attlist = self.non_default_attributes().items()
attlist.sort()
return attlist
def get(self, key, failobj=None):
return self.attributes.get(key, failobj)
def hasattr(self, attr):
return attr in self.attributes
def delattr(self, attr):
if attr in self.attributes:
del self.attributes[attr]
def setdefault(self, key, failobj=None):
return self.attributes.setdefault(key, failobj)
has_key = hasattr
# support operator in
__contains__ = hasattr
def append(self, item):
self.setup_child(item)
self.children.append(item)
def extend(self, item):
for node in item:
self.append(node)
def insert(self, index, item):
if isinstance(item, Node):
self.setup_child(item)
self.children.insert(index, item)
elif item is not None:
self[index:index] = item
def pop(self, i=-1):
return self.children.pop(i)
def remove(self, item):
self.children.remove(item)
def index(self, item):
return self.children.index(item)
def is_not_default(self, key):
if self[key] == [] and key in self.list_attributes:
return 0
else:
return 1
def update_basic_atts(self, dict):
"""
Update basic attributes ('ids', 'names', 'classes',
'dupnames', but not 'source') from node or dictionary `dict`.
"""
if isinstance(dict, Node):
dict = dict.attributes
for att in ('ids', 'classes', 'names', 'dupnames'):
for value in dict.get(att, []):
if not value in self[att]:
self[att].append(value)
def clear(self):
self.children = []
def replace(self, old, new):
"""Replace one child `Node` with another child or children."""
index = self.index(old)
if isinstance(new, Node):
self.setup_child(new)
self[index] = new
elif new is not None:
self[index:index+1] = new
def replace_self(self, new):
"""
Replace `self` node with `new`, where `new` is a node or a
list of nodes.
"""
update = new
if not isinstance(new, Node):
# `new` is a list; update first child.
try:
update = new[0]
except IndexError:
update = None
if isinstance(update, Element):
update.update_basic_atts(self)
else:
# `update` is a Text node or `new` is an empty list.
# Assert that we aren't losing any attributes.
for att in ('ids', 'names', 'classes', 'dupnames'):
assert not self[att], \
'Losing "%s" attribute: %s' % (att, self[att])
self.parent.replace(self, new)
def first_child_matching_class(self, childclass, start=0, end=sys.maxint):
"""
Return the index of the first child whose class exactly matches.
Parameters:
- `childclass`: A `Node` subclass to search for, or a tuple of `Node`
classes. If a tuple, any of the classes may match.
- `start`: Initial index to check.
- `end`: Initial index to *not* check.
"""
if not isinstance(childclass, tuple):
childclass = (childclass,)
for index in range(start, min(len(self), end)):
for c in childclass:
if isinstance(self[index], c):
return index
return None
def first_child_not_matching_class(self, childclass, start=0,
end=sys.maxint):
"""
Return the index of the first child whose class does *not* match.
Parameters:
- `childclass`: A `Node` subclass to skip, or a tuple of `Node`
classes. If a tuple, none of the classes may match.
- `start`: Initial index to check.
- `end`: Initial index to *not* check.
"""
if not isinstance(childclass, tuple):
childclass = (childclass,)
for index in range(start, min(len(self), end)):
for c in childclass:
if isinstance(self.children[index], c):
break
else:
return index
return None
def pformat(self, indent=' ', level=0):
return ''.join(['%s%s\n' % (indent * level, self.starttag())] +
[child.pformat(indent, level+1)
for child in self.children])
def copy(self):
return self.__class__(rawsource=self.rawsource, **self.attributes)
def deepcopy(self):
copy = self.copy()
copy.extend([child.deepcopy() for child in self.children])
return copy
def set_class(self, name):
"""Add a new class to the "classes" attribute."""
warnings.warn('docutils.nodes.Element.set_class deprecated; '
"append to Element['classes'] list attribute directly",
DeprecationWarning, stacklevel=2)
assert ' ' not in name
self['classes'].append(name.lower())
def note_referenced_by(self, name=None, id=None):
"""Note that this Element has been referenced by its name
`name` or id `id`."""
self.referenced = 1
# Element.expect_referenced_by_* dictionaries map names or ids
# to nodes whose ``referenced`` attribute is set to true as
# soon as this node is referenced by the given name or id.
# Needed for target propagation.
by_name = getattr(self, 'expect_referenced_by_name', {}).get(name)
by_id = getattr(self, 'expect_referenced_by_id', {}).get(id)
if by_name:
assert name is not None
by_name.referenced = 1
if by_id:
assert id is not None
by_id.referenced = 1
class TextElement(Element):
"""
An element which directly contains text.
Its children are all `Text` or `Inline` subclass nodes. You can
check whether an element's context is inline simply by checking whether
its immediate parent is a `TextElement` instance (including subclasses).
This is handy for nodes like `image` that can appear both inline and as
standalone body elements.
If passing children to `__init__()`, make sure to set `text` to
``''`` or some other suitable value.
"""
child_text_separator = ''
"""Separator for child nodes, used by `astext()` method."""
def __init__(self, rawsource='', text='', *children, **attributes):
if text != '':
textnode = Text(text)
Element.__init__(self, rawsource, textnode, *children,
**attributes)
else:
Element.__init__(self, rawsource, *children, **attributes)
class FixedTextElement(TextElement):
"""An element which directly contains preformatted text."""
def __init__(self, rawsource='', text='', *children, **attributes):
TextElement.__init__(self, rawsource, text, *children, **attributes)
self.attributes['xml:space'] = 'preserve'
# ========
# Mixins
# ========
class Resolvable:
resolved = 0
class BackLinkable:
def add_backref(self, refid):
self['backrefs'].append(refid)
# ====================
# Element Categories
# ====================
class Root: pass
class Titular: pass
class PreBibliographic:
"""Category of Node which may occur before Bibliographic Nodes."""
class Bibliographic: pass
class Decorative(PreBibliographic): pass
class Structural: pass
class Body: pass
class General(Body): pass
class Sequential(Body):
"""List-like elements."""
class Admonition(Body): pass
class Special(Body):
"""Special internal body elements."""
class Invisible(PreBibliographic):
"""Internal elements that don't appear in output."""
class Part: pass
class Inline: pass
class Referential(Resolvable): pass
class Targetable(Resolvable):
referenced = 0
indirect_reference_name = None
"""Holds the whitespace_normalized_name (contains mixed case) of a target.
Required for MoinMoin/reST compatibility."""
class Labeled:
"""Contains a `label` as its first element."""
# ==============
# Root Element
# ==============
class document(Root, Structural, Element):
"""
The document root element.
Do not instantiate this class directly; use
`docutils.utils.new_document()` instead.
"""
def __init__(self, settings, reporter, *args, **kwargs):
Element.__init__(self, *args, **kwargs)
self.current_source = None
"""Path to or description of the input source being processed."""
self.current_line = None
"""Line number (1-based) of `current_source`."""
self.settings = settings
"""Runtime settings data record."""
self.reporter = reporter
"""System message generator."""
self.indirect_targets = []
"""List of indirect target nodes."""
self.substitution_defs = {}
"""Mapping of substitution names to substitution_definition nodes."""
self.substitution_names = {}
"""Mapping of case-normalized substitution names to case-sensitive
names."""
self.refnames = {}
"""Mapping of names to lists of referencing nodes."""
self.refids = {}
"""Mapping of ids to lists of referencing nodes."""
self.nameids = {}
"""Mapping of names to unique id's."""
self.nametypes = {}
"""Mapping of names to hyperlink type (boolean: True => explicit,
False => implicit."""
self.ids = {}
"""Mapping of ids to nodes."""
self.footnote_refs = {}
"""Mapping of footnote labels to lists of footnote_reference nodes."""
self.citation_refs = {}
"""Mapping of citation labels to lists of citation_reference nodes."""
self.autofootnotes = []
"""List of auto-numbered footnote nodes."""
self.autofootnote_refs = []
"""List of auto-numbered footnote_reference nodes."""
self.symbol_footnotes = []
"""List of symbol footnote nodes."""
self.symbol_footnote_refs = []
"""List of symbol footnote_reference nodes."""
self.footnotes = []
"""List of manually-numbered footnote nodes."""
self.citations = []
"""List of citation nodes."""
self.autofootnote_start = 1
"""Initial auto-numbered footnote number."""
self.symbol_footnote_start = 0
"""Initial symbol footnote symbol index."""
self.id_start = 1
"""Initial ID number."""
self.parse_messages = []
"""System messages generated while parsing."""
self.transform_messages = []
"""System messages generated while applying transforms."""
import docutils.transforms
self.transformer = docutils.transforms.Transformer(self)
"""Storage for transforms to be applied to this document."""
self.decoration = None
"""Document's `decoration` node."""
self.document = self
def __getstate__(self):
"""
Return dict with unpicklable references removed.
"""
state = self.__dict__.copy()
state['reporter'] = None
state['transformer'] = None
return state
def asdom(self, dom=None):
"""Return a DOM representation of this document."""
if dom is None:
import xml.dom.minidom as dom
domroot = dom.Document()
domroot.appendChild(self._dom_node(domroot))
return domroot
def set_id(self, node, msgnode=None):
for id in node['ids']:
if id in self.ids and self.ids[id] is not node:
msg = self.reporter.severe('Duplicate ID: "%s".' % id)
if msgnode != None:
msgnode += msg
if not node['ids']:
for name in node['names']:
id = self.settings.id_prefix + make_id(name)
if id and id not in self.ids:
break
else:
id = ''
while not id or id in self.ids:
id = (self.settings.id_prefix +
self.settings.auto_id_prefix + str(self.id_start))
self.id_start += 1
node['ids'].append(id)
self.ids[id] = node
return id
def set_name_id_map(self, node, id, msgnode=None, explicit=None):
"""
`self.nameids` maps names to IDs, while `self.nametypes` maps names to
booleans representing hyperlink type (True==explicit,
False==implicit). This method updates the mappings.
The following state transition table shows how `self.nameids` ("ids")
and `self.nametypes` ("types") change with new input (a call to this
method), and what actions are performed ("implicit"-type system
messages are INFO/1, and "explicit"-type system messages are ERROR/3):
==== ===== ======== ======== ======= ==== ===== =====
Old State Input Action New State Notes
----------- -------- ----------------- ----------- -----
ids types new type sys.msg. dupname ids types
==== ===== ======== ======== ======= ==== ===== =====
- - explicit - - new True
- - implicit - - new False
None False explicit - - new True
old False explicit implicit old new True
None True explicit explicit new None True
old True explicit explicit new,old None True [#]_
None False implicit implicit new None False
old False implicit implicit new,old None False
None True implicit implicit new None True
old True implicit implicit new old True
==== ===== ======== ======== ======= ==== ===== =====
.. [#] Do not clear the name-to-id map or invalidate the old target if
both old and new targets are external and refer to identical URIs.
The new target is invalidated regardless.
"""
for name in node['names']:
if name in self.nameids:
self.set_duplicate_name_id(node, id, name, msgnode, explicit)
else:
self.nameids[name] = id
self.nametypes[name] = explicit
def set_duplicate_name_id(self, node, id, name, msgnode, explicit):
old_id = self.nameids[name]
old_explicit = self.nametypes[name]
self.nametypes[name] = old_explicit or explicit
if explicit:
if old_explicit:
level = 2
if old_id is not None:
old_node = self.ids[old_id]
if 'refuri' in node:
refuri = node['refuri']
if old_node['names'] \
and 'refuri' in old_node \
and old_node['refuri'] == refuri:
level = 1 # just inform if refuri's identical
if level > 1:
dupname(old_node, name)
self.nameids[name] = None
msg = self.reporter.system_message(
level, 'Duplicate explicit target name: "%s".' % name,
backrefs=[id], base_node=node)
if msgnode != None:
msgnode += msg
dupname(node, name)
else:
self.nameids[name] = id
if old_id is not None:
old_node = self.ids[old_id]
dupname(old_node, name)
else:
if old_id is not None and not old_explicit:
self.nameids[name] = None
old_node = self.ids[old_id]
dupname(old_node, name)
dupname(node, name)
if not explicit or (not old_explicit and old_id is not None):
msg = self.reporter.info(
'Duplicate implicit target name: "%s".' % name,
backrefs=[id], base_node=node)
if msgnode != None:
msgnode += msg
def has_name(self, name):
return name in self.nameids
# "note" here is an imperative verb: "take note of".
def note_implicit_target(self, target, msgnode=None):
id = self.set_id(target, msgnode)
self.set_name_id_map(target, id, msgnode, explicit=None)
def note_explicit_target(self, target, msgnode=None):
id = self.set_id(target, msgnode)
self.set_name_id_map(target, id, msgnode, explicit=1)
def note_refname(self, node):
self.refnames.setdefault(node['refname'], []).append(node)
def note_refid(self, node):
self.refids.setdefault(node['refid'], []).append(node)
def note_indirect_target(self, target):
self.indirect_targets.append(target)
if target['names']:
self.note_refname(target)
def note_anonymous_target(self, target):
self.set_id(target)
def note_autofootnote(self, footnote):
self.set_id(footnote)
self.autofootnotes.append(footnote)
def note_autofootnote_ref(self, ref):
self.set_id(ref)
self.autofootnote_refs.append(ref)
def note_symbol_footnote(self, footnote):
self.set_id(footnote)
self.symbol_footnotes.append(footnote)
def note_symbol_footnote_ref(self, ref):
self.set_id(ref)
self.symbol_footnote_refs.append(ref)
def note_footnote(self, footnote):
self.set_id(footnote)
self.footnotes.append(footnote)
def note_footnote_ref(self, ref):
self.set_id(ref)
self.footnote_refs.setdefault(ref['refname'], []).append(ref)
self.note_refname(ref)
def note_citation(self, citation):
self.citations.append(citation)
def note_citation_ref(self, ref):
self.set_id(ref)
self.citation_refs.setdefault(ref['refname'], []).append(ref)
self.note_refname(ref)
def note_substitution_def(self, subdef, def_name, msgnode=None):
name = whitespace_normalize_name(def_name)
if name in self.substitution_defs:
msg = self.reporter.error(
'Duplicate substitution definition name: "%s".' % name,
base_node=subdef)
if msgnode != None:
msgnode += msg
oldnode = self.substitution_defs[name]
dupname(oldnode, name)
# keep only the last definition:
self.substitution_defs[name] = subdef
# case-insensitive mapping:
self.substitution_names[fully_normalize_name(name)] = name
def note_substitution_ref(self, subref, refname):
subref['refname'] = whitespace_normalize_name(refname)
def note_pending(self, pending, priority=None):
self.transformer.add_pending(pending, priority)
def note_parse_message(self, message):
self.parse_messages.append(message)
def note_transform_message(self, message):
self.transform_messages.append(message)
def note_source(self, source, offset):
self.current_source = source
if offset is None:
self.current_line = offset
else:
self.current_line = offset + 1
def copy(self):
return self.__class__(self.settings, self.reporter,
**self.attributes)
def get_decoration(self):
if not self.decoration:
self.decoration = decoration()
index = self.first_child_not_matching_class(Titular)
if index is None:
self.append(self.decoration)
else:
self.insert(index, self.decoration)
return self.decoration
# ================
# Title Elements
# ================
class title(Titular, PreBibliographic, TextElement): pass
class subtitle(Titular, PreBibliographic, TextElement): pass
class rubric(Titular, TextElement): pass
# ========================
# Bibliographic Elements
# ========================
class docinfo(Bibliographic, Element): pass
class author(Bibliographic, TextElement): pass
class authors(Bibliographic, Element): pass
class organization(Bibliographic, TextElement): pass
class address(Bibliographic, FixedTextElement): pass
class contact(Bibliographic, TextElement): pass
class version(Bibliographic, TextElement): pass
class revision(Bibliographic, TextElement): pass
class status(Bibliographic, TextElement): pass
class date(Bibliographic, TextElement): pass
class copyright(Bibliographic, TextElement): pass
# =====================
# Decorative Elements
# =====================
class decoration(Decorative, Element):
def get_header(self):
if not len(self.children) or not isinstance(self.children[0], header):
self.insert(0, header())
return self.children[0]
def get_footer(self):
if not len(self.children) or not isinstance(self.children[-1], footer):
self.append(footer())
return self.children[-1]
class header(Decorative, Element): pass
class footer(Decorative, Element): pass
# =====================
# Structural Elements
# =====================
class section(Structural, Element): pass
class topic(Structural, Element):
"""
Topics are terminal, "leaf" mini-sections, like block quotes with titles,
or textual figures. A topic is just like a section, except that it has no
subsections, and it doesn't have to conform to section placement rules.
Topics are allowed wherever body elements (list, table, etc.) are allowed,
but only at the top level of a section or document. Topics cannot nest
inside topics, sidebars, or body elements; you can't have a topic inside a
table, list, block quote, etc.
"""
class sidebar(Structural, Element):
"""
Sidebars are like miniature, parallel documents that occur inside other
documents, providing related or reference material. A sidebar is
typically offset by a border and "floats" to the side of the page; the
document's main text may flow around it. Sidebars can also be likened to
super-footnotes; their content is outside of the flow of the document's
main text.
Sidebars are allowed wherever body elements (list, table, etc.) are
allowed, but only at the top level of a section or document. Sidebars
cannot nest inside sidebars, topics, or body elements; you can't have a
sidebar inside a table, list, block quote, etc.
"""
class transition(Structural, Element): pass
# ===============
# Body Elements
# ===============
class paragraph(General, TextElement): pass
class compound(General, Element): pass
class container(General, Element): pass
class bullet_list(Sequential, Element): pass
class enumerated_list(Sequential, Element): pass
class list_item(Part, Element): pass
class definition_list(Sequential, Element): pass
class definition_list_item(Part, Element): pass
class term(Part, TextElement): pass
class classifier(Part, TextElement): pass
class definition(Part, Element): pass
class field_list(Sequential, Element): pass
class field(Part, Element): pass
class field_name(Part, TextElement): pass
class field_body(Part, Element): pass
class option(Part, Element):
child_text_separator = ''
class option_argument(Part, TextElement):
def astext(self):
return self.get('delimiter', ' ') + TextElement.astext(self)
class option_group(Part, Element):
child_text_separator = ', '
class option_list(Sequential, Element): pass
class option_list_item(Part, Element):
child_text_separator = ' '
class option_string(Part, TextElement): pass
class description(Part, Element): pass
class literal_block(General, FixedTextElement): pass
class doctest_block(General, FixedTextElement): pass
class line_block(General, Element): pass
class line(Part, TextElement):
indent = None
class block_quote(General, Element): pass
class attribution(Part, TextElement): pass
class attention(Admonition, Element): pass
class caution(Admonition, Element): pass
class danger(Admonition, Element): pass
class error(Admonition, Element): pass
class important(Admonition, Element): pass
class note(Admonition, Element): pass
class tip(Admonition, Element): pass
class hint(Admonition, Element): pass
class warning(Admonition, Element): pass
class admonition(Admonition, Element): pass
class comment(Special, Invisible, FixedTextElement): pass
class substitution_definition(Special, Invisible, TextElement): pass
class target(Special, Invisible, Inline, TextElement, Targetable): pass
class footnote(General, BackLinkable, Element, Labeled, Targetable): pass
class citation(General, BackLinkable, Element, Labeled, Targetable): pass
class label(Part, TextElement): pass
class figure(General, Element): pass
class caption(Part, TextElement): pass
class legend(Part, Element): pass
class table(General, Element): pass
class tgroup(Part, Element): pass
class colspec(Part, Element): pass
class thead(Part, Element): pass
class tbody(Part, Element): pass
class row(Part, Element): pass
class entry(Part, Element): pass
class system_message(Special, BackLinkable, PreBibliographic, Element):
"""
System message element.
Do not instantiate this class directly; use
``document.reporter.info/warning/error/severe()`` instead.
"""
def __init__(self, message=None, *children, **attributes):
if message:
p = paragraph('', message)
children = (p,) + children
try:
Element.__init__(self, '', *children, **attributes)
except:
print 'system_message: children=%r' % (children,)
raise
def astext(self):
line = self.get('line', '')
return u'%s:%s: (%s/%s) %s' % (self['source'], line, self['type'],
self['level'], Element.astext(self))
class pending(Special, Invisible, Element):
"""
The "pending" element is used to encapsulate a pending operation: the
operation (transform), the point at which to apply it, and any data it
requires. Only the pending operation's location within the document is
stored in the public document tree (by the "pending" object itself); the
operation and its data are stored in the "pending" object's internal
instance attributes.
For example, say you want a table of contents in your reStructuredText
document. The easiest way to specify where to put it is from within the
document, with a directive::
.. contents::
But the "contents" directive can't do its work until the entire document
has been parsed and possibly transformed to some extent. So the directive
code leaves a placeholder behind that will trigger the second phase of its
processing, something like this::
<pending ...public attributes...> + internal attributes
Use `document.note_pending()` so that the
`docutils.transforms.Transformer` stage of processing can run all pending
transforms.
"""
def __init__(self, transform, details=None,
rawsource='', *children, **attributes):
Element.__init__(self, rawsource, *children, **attributes)
self.transform = transform
"""The `docutils.transforms.Transform` class implementing the pending
operation."""
self.details = details or {}
"""Detail data (dictionary) required by the pending operation."""
def pformat(self, indent=' ', level=0):
internals = [
'.. internal attributes:',
' .transform: %s.%s' % (self.transform.__module__,
self.transform.__name__),
' .details:']
details = self.details.items()
details.sort()
for key, value in details:
if isinstance(value, Node):
internals.append('%7s%s:' % ('', key))
internals.extend(['%9s%s' % ('', line)
for line in value.pformat().splitlines()])
elif value and isinstance(value, list) \
and isinstance(value[0], Node):
internals.append('%7s%s:' % ('', key))
for v in value:
internals.extend(['%9s%s' % ('', line)
for line in v.pformat().splitlines()])
else:
internals.append('%7s%s: %r' % ('', key, value))
return (Element.pformat(self, indent, level)
+ ''.join([(' %s%s\n' % (indent * level, line))
for line in internals]))
def copy(self):
return self.__class__(self.transform, self.details, self.rawsource,
**self.attributes)
class raw(Special, Inline, PreBibliographic, FixedTextElement):
"""
Raw data that is to be passed untouched to the Writer.
"""
pass
# =================
# Inline Elements
# =================
class emphasis(Inline, TextElement): pass
class strong(Inline, TextElement): pass
class literal(Inline, TextElement): pass
class reference(General, Inline, Referential, TextElement): pass
class footnote_reference(Inline, Referential, TextElement): pass
class citation_reference(Inline, Referential, TextElement): pass
class substitution_reference(Inline, TextElement): pass
class title_reference(Inline, TextElement): pass
class abbreviation(Inline, TextElement): pass
class acronym(Inline, TextElement): pass
class superscript(Inline, TextElement): pass
class subscript(Inline, TextElement): pass
class image(General, Inline, Element):
def astext(self):
return self.get('alt', '')
class inline(Inline, TextElement): pass
class problematic(Inline, TextElement): pass
class generated(Inline, TextElement): pass
# ========================================
# Auxiliary Classes, Functions, and Data
# ========================================
node_class_names = """
Text
abbreviation acronym address admonition attention attribution author
authors
block_quote bullet_list
caption caution citation citation_reference classifier colspec comment
compound contact container copyright
danger date decoration definition definition_list definition_list_item
description docinfo doctest_block document
emphasis entry enumerated_list error
field field_body field_list field_name figure footer
footnote footnote_reference
generated
header hint
image important inline
label legend line line_block list_item literal literal_block
note
option option_argument option_group option_list option_list_item
option_string organization
paragraph pending problematic
raw reference revision row rubric
section sidebar status strong subscript substitution_definition
substitution_reference subtitle superscript system_message
table target tbody term tgroup thead tip title title_reference topic
transition
version
warning""".split()
"""A list of names of all concrete Node subclasses."""
class NodeVisitor:
"""
"Visitor" pattern [GoF95]_ abstract superclass implementation for
document tree traversals.
Each node class has corresponding methods, doing nothing by
default; override individual methods for specific and useful
behaviour. The `dispatch_visit()` method is called by
`Node.walk()` upon entering a node. `Node.walkabout()` also calls
the `dispatch_departure()` method before exiting a node.
The dispatch methods call "``visit_`` + node class name" or
"``depart_`` + node class name", resp.
This is a base class for visitors whose ``visit_...`` & ``depart_...``
methods should be implemented for *all* node types encountered (such as
for `docutils.writers.Writer` subclasses). Unimplemented methods will
raise exceptions.
For sparse traversals, where only certain node types are of interest,
subclass `SparseNodeVisitor` instead. When (mostly or entirely) uniform
processing is desired, subclass `GenericNodeVisitor`.
.. [GoF95] Gamma, Helm, Johnson, Vlissides. *Design Patterns: Elements of
Reusable Object-Oriented Software*. Addison-Wesley, Reading, MA, USA,
1995.
"""
optional = ()
"""
Tuple containing node class names (as strings).
No exception will be raised if writers do not implement visit
or departure functions for these node classes.
Used to ensure transitional compatibility with existing 3rd-party writers.
"""
def __init__(self, document):
self.document = document
def dispatch_visit(self, node):
"""
Call self."``visit_`` + node class name" with `node` as
parameter. If the ``visit_...`` method does not exist, call
self.unknown_visit.
"""
node_name = node.__class__.__name__
method = getattr(self, 'visit_' + node_name, self.unknown_visit)
self.document.reporter.debug(
'docutils.nodes.NodeVisitor.dispatch_visit calling %s for %s'
% (method.__name__, node_name))
return method(node)
def dispatch_departure(self, node):
"""
Call self."``depart_`` + node class name" with `node` as
parameter. If the ``depart_...`` method does not exist, call
self.unknown_departure.
"""
node_name = node.__class__.__name__
method = getattr(self, 'depart_' + node_name, self.unknown_departure)
self.document.reporter.debug(
'docutils.nodes.NodeVisitor.dispatch_departure calling %s for %s'
% (method.__name__, node_name))
return method(node)
def unknown_visit(self, node):
"""
Called when entering unknown `Node` types.
Raise an exception unless overridden.
"""
if (self.document.settings.strict_visitor
or node.__class__.__name__ not in self.optional):
raise NotImplementedError(
'%s visiting unknown node type: %s'
% (self.__class__, node.__class__.__name__))
def unknown_departure(self, node):
"""
Called before exiting unknown `Node` types.
Raise exception unless overridden.
"""
if (self.document.settings.strict_visitor
or node.__class__.__name__ not in self.optional):
raise NotImplementedError(
'%s departing unknown node type: %s'
% (self.__class__, node.__class__.__name__))
class SparseNodeVisitor(NodeVisitor):
"""
Base class for sparse traversals, where only certain node types are of
interest. When ``visit_...`` & ``depart_...`` methods should be
implemented for *all* node types (such as for `docutils.writers.Writer`
subclasses), subclass `NodeVisitor` instead.
"""
class GenericNodeVisitor(NodeVisitor):
"""
Generic "Visitor" abstract superclass, for simple traversals.
Unless overridden, each ``visit_...`` method calls `default_visit()`, and
each ``depart_...`` method (when using `Node.walkabout()`) calls
`default_departure()`. `default_visit()` (and `default_departure()`) must
be overridden in subclasses.
Define fully generic visitors by overriding `default_visit()` (and
`default_departure()`) only. Define semi-generic visitors by overriding
individual ``visit_...()`` (and ``depart_...()``) methods also.
`NodeVisitor.unknown_visit()` (`NodeVisitor.unknown_departure()`) should
be overridden for default behavior.
"""
def default_visit(self, node):
"""Override for generic, uniform traversals."""
raise NotImplementedError
def default_departure(self, node):
"""Override for generic, uniform traversals."""
raise NotImplementedError
def _call_default_visit(self, node):
self.default_visit(node)
def _call_default_departure(self, node):
self.default_departure(node)
def _nop(self, node):
pass
def _add_node_class_names(names):
"""Save typing with dynamic assignments:"""
for _name in names:
setattr(GenericNodeVisitor, "visit_" + _name, _call_default_visit)
setattr(GenericNodeVisitor, "depart_" + _name, _call_default_departure)
setattr(SparseNodeVisitor, 'visit_' + _name, _nop)
setattr(SparseNodeVisitor, 'depart_' + _name, _nop)
_add_node_class_names(node_class_names)
class TreeCopyVisitor(GenericNodeVisitor):
"""
Make a complete copy of a tree or branch, including element attributes.
"""
def __init__(self, document):
GenericNodeVisitor.__init__(self, document)
self.parent_stack = []
self.parent = []
def get_tree_copy(self):
return self.parent[0]
def default_visit(self, node):
"""Copy the current node, and make it the new acting parent."""
newnode = node.copy()
self.parent.append(newnode)
self.parent_stack.append(self.parent)
self.parent = newnode
def default_departure(self, node):
"""Restore the previous acting parent."""
self.parent = self.parent_stack.pop()
class TreePruningException(Exception):
"""
Base class for `NodeVisitor`-related tree pruning exceptions.
Raise subclasses from within ``visit_...`` or ``depart_...`` methods
called from `Node.walk()` and `Node.walkabout()` tree traversals to prune
the tree traversed.
"""
pass
class SkipChildren(TreePruningException):
"""
Do not visit any children of the current node. The current node's
siblings and ``depart_...`` method are not affected.
"""
pass
class SkipSiblings(TreePruningException):
"""
Do not visit any more siblings (to the right) of the current node. The
current node's children and its ``depart_...`` method are not affected.
"""
pass
class SkipNode(TreePruningException):
"""
Do not visit the current node's children, and do not call the current
node's ``depart_...`` method.
"""
pass
class SkipDeparture(TreePruningException):
"""
Do not call the current node's ``depart_...`` method. The current node's
children and siblings are not affected.
"""
pass
class NodeFound(TreePruningException):
"""
Raise to indicate that the target of a search has been found. This
exception must be caught by the client; it is not caught by the traversal
code.
"""
pass
class StopTraversal(TreePruningException):
"""
Stop the traversal alltogether. The current node's ``depart_...`` method
is not affected. The parent nodes ``depart_...`` methods are also called
as usual. No other nodes are visited. This is an alternative to
NodeFound that does not cause exception handling to trickle up to the
caller.
"""
pass
def make_id(string):
"""
Convert `string` into an identifier and return it.
Docutils identifiers will conform to the regular expression
``[a-z](-?[a-z0-9]+)*``. For CSS compatibility, identifiers (the "class"
and "id" attributes) should have no underscores, colons, or periods.
Hyphens may be used.
- The `HTML 4.01 spec`_ defines identifiers based on SGML tokens:
ID and NAME tokens must begin with a letter ([A-Za-z]) and may be
followed by any number of letters, digits ([0-9]), hyphens ("-"),
underscores ("_"), colons (":"), and periods (".").
- However the `CSS1 spec`_ defines identifiers based on the "name" token,
a tighter interpretation ("flex" tokenizer notation; "latin1" and
"escape" 8-bit characters have been replaced with entities)::
unicode \\[0-9a-f]{1,4}
latin1 [¡-ÿ]
escape {unicode}|\\[ -~¡-ÿ]
nmchar [-a-z0-9]|{latin1}|{escape}
name {nmchar}+
The CSS1 "nmchar" rule does not include underscores ("_"), colons (":"),
or periods ("."), therefore "class" and "id" attributes should not contain
these characters. They should be replaced with hyphens ("-"). Combined
with HTML's requirements (the first character must be a letter; no
"unicode", "latin1", or "escape" characters), this results in the
``[a-z](-?[a-z0-9]+)*`` pattern.
.. _HTML 4.01 spec: http://www.w3.org/TR/html401
.. _CSS1 spec: http://www.w3.org/TR/REC-CSS1
"""
id = string.lower()
if not isinstance(id, unicode):
id = id.decode()
id = id.translate(_non_id_translate_digraphs)
id = id.translate(_non_id_translate)
# get rid of non-ascii characters.
# 'ascii' lowercase to prevent problems with turkish locale.
id = unicodedata.normalize('NFKD', id).\
encode('ascii', 'ignore').decode('ascii')
# shrink runs of whitespace and replace by hyphen
id = _non_id_chars.sub('-', ' '.join(id.split()))
id = _non_id_at_ends.sub('', id)
return str(id)
_non_id_chars = re.compile('[^a-z0-9]+')
_non_id_at_ends = re.compile('^[-0-9]+|-+$')
_non_id_translate = {
0x00f8: u'o', # o with stroke
0x0111: u'd', # d with stroke
0x0127: u'h', # h with stroke
0x0131: u'i', # dotless i
0x0142: u'l', # l with stroke
0x0167: u't', # t with stroke
0x0180: u'b', # b with stroke
0x0183: u'b', # b with topbar
0x0188: u'c', # c with hook
0x018c: u'd', # d with topbar
0x0192: u'f', # f with hook
0x0199: u'k', # k with hook
0x019a: u'l', # l with bar
0x019e: u'n', # n with long right leg
0x01a5: u'p', # p with hook
0x01ab: u't', # t with palatal hook
0x01ad: u't', # t with hook
0x01b4: u'y', # y with hook
0x01b6: u'z', # z with stroke
0x01e5: u'g', # g with stroke
0x0225: u'z', # z with hook
0x0234: u'l', # l with curl
0x0235: u'n', # n with curl
0x0236: u't', # t with curl
0x0237: u'j', # dotless j
0x023c: u'c', # c with stroke
0x023f: u's', # s with swash tail
0x0240: u'z', # z with swash tail
0x0247: u'e', # e with stroke
0x0249: u'j', # j with stroke
0x024b: u'q', # q with hook tail
0x024d: u'r', # r with stroke
0x024f: u'y', # y with stroke
}
_non_id_translate_digraphs = {
0x00df: u'sz', # ligature sz
0x00e6: u'ae', # ae
0x0153: u'oe', # ligature oe
0x0238: u'db', # db digraph
0x0239: u'qp', # qp digraph
}
def dupname(node, name):
node['dupnames'].append(name)
node['names'].remove(name)
# Assume that this method is referenced, even though it isn't; we
# don't want to throw unnecessary system_messages.
node.referenced = 1
def fully_normalize_name(name):
"""Return a case- and whitespace-normalized name."""
return ' '.join(name.lower().split())
def whitespace_normalize_name(name):
"""Return a whitespace-normalized name."""
return ' '.join(name.split())
def serial_escape(value):
"""Escape string values that are elements of a list, for serialization."""
return value.replace('\\', r'\\').replace(' ', r'\ ')
#
#
# Local Variables:
# indent-tabs-mode: nil
# sentence-end-double-space: t
# fill-column: 78
# End:
|
MichaelNedzelsky/intellij-community
|
python/helpers/py2only/docutils/nodes.py
|
Python
|
apache-2.0
| 64,847
|
[
"VisIt"
] |
bd275c5bdc6b61c8bf6e0b744f251bbd93c8c3a5b86417d544fc6f24ad67ee21
|
from basecrm.configuration import Configuration
from basecrm.http_client import HttpClient
import basecrm.services
import basecrm.sync
class Client(object):
"""
The :class:`Client <Client>` is the entry point to all services and actions.
:attribute :class:`Configuration <basecrm.Configuration>` config: Current Base CRM client configuration.
:attribute :class:`HttpClient <basecrm.HttpClient>` http_client: Http client.
:copyright: (c) 2015 - 2021 by BaseCRM developers (developers@getbase.com).
:license: MIT, see LICENSE for more details.
"""
def __init__(self, **options):
"""
Usage::
>>> import os
>>> import basecrm
>>> client = basecrm.Client(access_token=os.environ.get('BASECRM_ACCESS_TOKEN'))
<basecrm.Client>
:param str access_token: Personal access token.
:param str base_url: (optional) Base url for the api. Default: ``https://api.getbase.com``.
:param str user_agent: (optional) Client user agent. Default: ``BaseCRM/v2 Python/{basecrm.VERSION}``.
:param bool verbose: (optional) Verbose/debug mode. Default: ``False``.
:param int timeout: (optional) Connection and response timeout. Default: **30** seconds.
:param bool verify_ssl: (optional) Whether to verify ssl or not. Default: ``True``.
:raises ConfigurationError: if no ``access_token`` provided.
:raises ConfigurationError: if provided ``access_token`` is invalid - contains disallowed characters.
:raises ConfigurationError: if provided ``access_token`` is invalid - has invalid length.
:raises ConfigurationError: if provided ``base_url`` is invalid.
"""
self.config = Configuration(**options)
self.config.validate()
self.http_client = HttpClient(self.config)
self.__accounts = basecrm.services.AccountsService(self.http_client)
self.__associated_contacts = basecrm.services.AssociatedContactsService(self.http_client)
self.__contacts = basecrm.services.ContactsService(self.http_client)
self.__deals = basecrm.services.DealsService(self.http_client)
self.__deal_sources = basecrm.services.DealSourcesService(self.http_client)
self.__deal_unqualified_reasons = basecrm.services.DealUnqualifiedReasonsService(self.http_client)
self.__leads = basecrm.services.LeadsService(self.http_client)
self.__lead_sources = basecrm.services.LeadSourcesService(self.http_client)
self.__lead_unqualified_reasons = basecrm.services.LeadUnqualifiedReasonsService(self.http_client)
self.__line_items = basecrm.services.LineItemsService(self.http_client)
self.__loss_reasons = basecrm.services.LossReasonsService(self.http_client)
self.__notes = basecrm.services.NotesService(self.http_client)
self.__orders = basecrm.services.OrdersService(self.http_client)
self.__pipelines = basecrm.services.PipelinesService(self.http_client)
self.__products = basecrm.services.ProductsService(self.http_client)
self.__sources = basecrm.services.SourcesService(self.http_client)
self.__stages = basecrm.services.StagesService(self.http_client)
self.__tags = basecrm.services.TagsService(self.http_client)
self.__tasks = basecrm.services.TasksService(self.http_client)
self.__text_messages = basecrm.services.TextMessagesService(self.http_client)
self.__users = basecrm.services.UsersService(self.http_client)
self.__visits = basecrm.services.VisitsService(self.http_client)
self.__visit_outcomes = basecrm.services.VisitOutcomesService(self.http_client)
self.__sync = basecrm.sync.SyncService(self.http_client)
@property
def accounts(self):
"""
:return: :class:`AccountsService <basecrm.AccountsService>` object that gives you an access to all Account related actions.
:rtype: basecrm.AccountsService
"""
return self.__accounts
@property
def associated_contacts(self):
"""
:return: :class:`AssociatedContactsService <basecrm.AssociatedContactsService>` object that gives you an access to all AssociatedContact related actions.
:rtype: basecrm.AssociatedContactsService
"""
return self.__associated_contacts
@property
def contacts(self):
"""
:return: :class:`ContactsService <basecrm.ContactsService>` object that gives you an access to all Contact related actions.
:rtype: basecrm.ContactsService
"""
return self.__contacts
@property
def deals(self):
"""
:return: :class:`DealsService <basecrm.DealsService>` object that gives you an access to all Deal related actions.
:rtype: basecrm.DealsService
"""
return self.__deals
@property
def deal_sources(self):
"""
:return: :class:`DealSourcesService <basecrm.DealSourcesService>` object that gives you an access to all DealSource related actions.
:rtype: basecrm.DealSourcesService
"""
return self.__deal_sources
@property
def deal_unqualified_reasons(self):
"""
:return: :class:`DealUnqualifiedReasonsService <basecrm.DealUnqualifiedReasonsService>` object that gives you an access to all DealUnqualifiedReason related actions.
:rtype: basecrm.DealUnqualifiedReasonsService
"""
return self.__deal_unqualified_reasons
@property
def leads(self):
"""
:return: :class:`LeadsService <basecrm.LeadsService>` object that gives you an access to all Lead related actions.
:rtype: basecrm.LeadsService
"""
return self.__leads
@property
def lead_sources(self):
"""
:return: :class:`LeadSourcesService <basecrm.LeadSourcesService>` object that gives you an access to all LeadSource related actions.
:rtype: basecrm.LeadSourcesService
"""
return self.__lead_sources
@property
def lead_unqualified_reasons(self):
"""
:return: :class:`LeadUnqualifiedReasonsService <basecrm.LeadUnqualifiedReasonsService>` object that gives you an access to all LeadUnqualifiedReason related actions.
:rtype: basecrm.LeadUnqualifiedReasonsService
"""
return self.__lead_unqualified_reasons
@property
def line_items(self):
"""
:return: :class:`LineItemsService <basecrm.LineItemsService>` object that gives you an access to all LineItem related actions.
:rtype: basecrm.LineItemsService
"""
return self.__line_items
@property
def loss_reasons(self):
"""
:return: :class:`LossReasonsService <basecrm.LossReasonsService>` object that gives you an access to all LossReason related actions.
:rtype: basecrm.LossReasonsService
"""
return self.__loss_reasons
@property
def notes(self):
"""
:return: :class:`NotesService <basecrm.NotesService>` object that gives you an access to all Note related actions.
:rtype: basecrm.NotesService
"""
return self.__notes
@property
def orders(self):
"""
:return: :class:`OrdersService <basecrm.OrdersService>` object that gives you an access to all Order related actions.
:rtype: basecrm.OrdersService
"""
return self.__orders
@property
def pipelines(self):
"""
:return: :class:`PipelinesService <basecrm.PipelinesService>` object that gives you an access to all Pipeline related actions.
:rtype: basecrm.PipelinesService
"""
return self.__pipelines
@property
def products(self):
"""
:return: :class:`ProductsService <basecrm.ProductsService>` object that gives you an access to all Product related actions.
:rtype: basecrm.ProductsService
"""
return self.__products
@property
def sources(self):
"""
:return: :class:`SourcesService <basecrm.SourcesService>` object that gives you an access to all Source related actions.
:rtype: basecrm.SourcesService
"""
return self.__sources
@property
def stages(self):
"""
:return: :class:`StagesService <basecrm.StagesService>` object that gives you an access to all Stage related actions.
:rtype: basecrm.StagesService
"""
return self.__stages
@property
def tags(self):
"""
:return: :class:`TagsService <basecrm.TagsService>` object that gives you an access to all Tag related actions.
:rtype: basecrm.TagsService
"""
return self.__tags
@property
def tasks(self):
"""
:return: :class:`TasksService <basecrm.TasksService>` object that gives you an access to all Task related actions.
:rtype: basecrm.TasksService
"""
return self.__tasks
@property
def text_messages(self):
"""
:return: :class:`TextMessagesService <basecrm.TextMessagesService>` object that gives you an access to all TextMessage related actions.
:rtype: basecrm.TextMessagesService
"""
return self.__text_messages
@property
def users(self):
"""
:return: :class:`UsersService <basecrm.UsersService>` object that gives you an access to all User related actions.
:rtype: basecrm.UsersService
"""
return self.__users
@property
def visits(self):
"""
:return: :class:`VisitsService <basecrm.VisitsService>` object that gives you an access to all Visit related actions.
:rtype: basecrm.VisitsService
"""
return self.__visits
@property
def visit_outcomes(self):
"""
:return: :class:`VisitOutcomesService <basecrm.VisitOutcomesService>` object that gives you an access to all VisitOutcome related actions.
:rtype: basecrm.VisitOutcomesService
"""
return self.__visit_outcomes
@property
def sync(self):
"""
:return: :class:`SyncService <basecrm.SyncService>` object that gives you an access to all low-level Sync API related actions.
:rtype: basecrm.SyncService
"""
return self.__sync
|
basecrm/basecrm-python
|
basecrm/client.py
|
Python
|
mit
| 10,350
|
[
"VisIt"
] |
1e0ee313cbe07410ac8a35db90c0378e1c9bb64f261244d059f8fca04abd58b7
|
"""
Courseware views functions
"""
import logging
import urllib
import json
import cgi
from datetime import datetime
from django.utils import translation
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from django.conf import settings
from django.core.context_processors import csrf
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.utils.timezone import UTC
from django.views.decorators.http import require_GET, require_POST, require_http_methods
from django.http import Http404, HttpResponse, HttpResponseBadRequest
from django.shortcuts import redirect
from certificates import api as certs_api
from edxmako.shortcuts import render_to_response, render_to_string, marketing_link
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from django.db import transaction
from markupsafe import escape
from courseware import grades
from courseware.access import has_access, in_preview_mode, _adjust_start_date_for_beta_testers
from courseware.courses import (
get_courses, get_course,
get_studio_url, get_course_with_access,
sort_by_announcement,
sort_by_start_date,
)
from courseware.masquerade import setup_masquerade
from courseware.model_data import FieldDataCache
from .module_render import toc_for_course, get_module_for_descriptor, get_module, get_module_by_usage_id
from .entrance_exams import (
course_has_entrance_exam,
get_entrance_exam_content,
get_entrance_exam_score,
user_must_complete_entrance_exam,
user_has_passed_entrance_exam
)
from courseware.models import StudentModule, StudentModuleHistory
from course_modes.models import CourseMode
from open_ended_grading import open_ended_notifications
from open_ended_grading.views import StaffGradingTab, PeerGradingTab, OpenEndedGradingTab
from student.models import UserTestGroup, CourseEnrollment
from student.views import is_course_blocked
from util.cache import cache, cache_if_anonymous
from xblock.fragment import Fragment
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError, NoPathToItem
from xmodule.tabs import CourseTabList
from xmodule.x_module import STUDENT_VIEW
import shoppingcart
from shoppingcart.models import CourseRegistrationCode
from shoppingcart.utils import is_shopping_cart_enabled
from opaque_keys import InvalidKeyError
from util.milestones_helpers import get_prerequisite_courses_display
from microsite_configuration import microsite
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.keys import CourseKey, UsageKey
from instructor.enrollment import uses_shib
from util.db import commit_on_success_with_read_committed
import survey.utils
import survey.views
from util.views import ensure_valid_course_key
from eventtracking import tracker
import analytics
from courseware.url_helpers import get_redirect_url
log = logging.getLogger("edx.courseware")
template_imports = {'urllib': urllib}
CONTENT_DEPTH = 2
def user_groups(user):
"""
TODO (vshnayder): This is not used. When we have a new plan for groups, adjust appropriately.
"""
if not user.is_authenticated():
return []
# TODO: Rewrite in Django
key = 'user_group_names_{user.id}'.format(user=user)
cache_expiration = 60 * 60 # one hour
# Kill caching on dev machines -- we switch groups a lot
group_names = cache.get(key)
if settings.DEBUG:
group_names = None
if group_names is None:
group_names = [u.name for u in UserTestGroup.objects.filter(users=user)]
cache.set(key, group_names, cache_expiration)
return group_names
@ensure_csrf_cookie
@cache_if_anonymous()
def courses(request):
"""
Render "find courses" page. The course selection work is done in courseware.courses.
"""
courses_list = []
course_discovery_meanings = getattr(settings, 'COURSE_DISCOVERY_MEANINGS', {})
if not settings.FEATURES.get('ENABLE_COURSE_DISCOVERY'):
courses_list = get_courses(request.user, request.META.get('HTTP_HOST'))
if microsite.get_value("ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"]):
courses_list = sort_by_start_date(courses_list)
else:
courses_list = sort_by_announcement(courses_list)
return render_to_response(
"courseware/courses.html",
{'courses': courses_list, 'course_discovery_meanings': course_discovery_meanings}
)
def render_accordion(request, course, chapter, section, field_data_cache):
"""
Draws navigation bar. Takes current position in accordion as
parameter.
If chapter and section are '' or None, renders a default accordion.
course, chapter, and section are the url_names.
Returns the html string
"""
# grab the table of contents
toc = toc_for_course(request, course, chapter, section, field_data_cache)
context = dict([
('toc', toc),
('course_id', course.id.to_deprecated_string()),
('csrf', csrf(request)['csrf_token']),
('due_date_display_format', course.due_date_display_format)
] + template_imports.items())
return render_to_string('courseware/accordion.html', context)
def get_current_child(xmodule, min_depth=None):
"""
Get the xmodule.position's display item of an xmodule that has a position and
children. If xmodule has no position or is out of bounds, return the first
child with children extending down to content_depth.
For example, if chapter_one has no position set, with two child sections,
section-A having no children and section-B having a discussion unit,
`get_current_child(chapter, min_depth=1)` will return section-B.
Returns None only if there are no children at all.
"""
def _get_default_child_module(child_modules):
"""Returns the first child of xmodule, subject to min_depth."""
if not child_modules:
default_child = None
elif not min_depth > 0:
default_child = child_modules[0]
else:
content_children = [child for child in child_modules if
child.has_children_at_depth(min_depth - 1) and child.get_display_items()]
default_child = content_children[0] if content_children else None
return default_child
if not hasattr(xmodule, 'position'):
return None
if xmodule.position is None:
return _get_default_child_module(xmodule.get_display_items())
else:
# position is 1-indexed.
pos = xmodule.position - 1
children = xmodule.get_display_items()
if 0 <= pos < len(children):
child = children[pos]
elif len(children) > 0:
# module has a set position, but the position is out of range.
# return default child.
child = _get_default_child_module(children)
else:
child = None
return child
def redirect_to_course_position(course_module, content_depth):
"""
Return a redirect to the user's current place in the course.
If this is the user's first time, redirects to COURSE/CHAPTER/SECTION.
If this isn't the users's first time, redirects to COURSE/CHAPTER,
and the view will find the current section and display a message
about reusing the stored position.
If there is no current position in the course or chapter, then selects
the first child.
"""
urlargs = {'course_id': course_module.id.to_deprecated_string()}
chapter = get_current_child(course_module, min_depth=content_depth)
if chapter is None:
# oops. Something bad has happened.
raise Http404("No chapter found when loading current position in course")
urlargs['chapter'] = chapter.url_name
if course_module.position is not None:
return redirect(reverse('courseware_chapter', kwargs=urlargs))
# Relying on default of returning first child
section = get_current_child(chapter, min_depth=content_depth - 1)
if section is None:
raise Http404("No section found when loading current position in course")
urlargs['section'] = section.url_name
return redirect(reverse('courseware_section', kwargs=urlargs))
def save_child_position(seq_module, child_name):
"""
child_name: url_name of the child
"""
for position, c in enumerate(seq_module.get_display_items(), start=1):
if c.location.name == child_name:
# Only save if position changed
if position != seq_module.position:
seq_module.position = position
# Save this new position to the underlying KeyValueStore
seq_module.save()
def save_positions_recursively_up(user, request, field_data_cache, xmodule):
"""
Recurses up the course tree starting from a leaf
Saving the position property based on the previous node as it goes
"""
current_module = xmodule
while current_module:
parent_location = modulestore().get_parent_location(current_module.location)
parent = None
if parent_location:
parent_descriptor = modulestore().get_item(parent_location)
parent = get_module_for_descriptor(user, request, parent_descriptor, field_data_cache, current_module.location.course_key)
if parent and hasattr(parent, 'position'):
save_child_position(parent, current_module.location.name)
current_module = parent
def chat_settings(course, user):
"""
Returns a dict containing the settings required to connect to a
Jabber chat server and room.
"""
domain = getattr(settings, "JABBER_DOMAIN", None)
if domain is None:
log.warning('You must set JABBER_DOMAIN in the settings to '
'enable the chat widget')
return None
return {
'domain': domain,
# Jabber doesn't like slashes, so replace with dashes
'room': "{ID}_class".format(ID=course.id.replace('/', '-')),
'username': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
# TODO: clearly this needs to be something other than the username
# should also be something that's not necessarily tied to a
# particular course
'password': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
}
@login_required
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@ensure_valid_course_key
@commit_on_success_with_read_committed
def index(request, course_id, chapter=None, section=None,
position=None):
"""
Displays courseware accordion and associated content. If course, chapter,
and section are all specified, renders the page, or returns an error if they
are invalid.
If section is not specified, displays the accordion opened to the right chapter.
If neither chapter or section are specified, redirects to user's most recent
chapter, or the first chapter if this is the user's first visit.
Arguments:
- request : HTTP request
- course_id : course id (str: ORG/course/URL_NAME)
- chapter : chapter url_name (str)
- section : section url_name (str)
- position : position in module, eg of <sequential> module (str)
Returns:
- HTTPresponse
"""
course_key = CourseKey.from_string(course_id)
user = User.objects.prefetch_related("groups").get(id=request.user.id)
redeemed_registration_codes = CourseRegistrationCode.objects.filter(
course_id=course_key,
registrationcoderedemption__redeemed_by=request.user
)
# Redirect to dashboard if the course is blocked due to non-payment.
if is_course_blocked(request, redeemed_registration_codes, course_key):
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
log.warning(
u'User %s cannot access the course %s because payment has not yet been received',
user,
course_key.to_deprecated_string()
)
return redirect(reverse('dashboard'))
request.user = user # keep just one instance of User
with modulestore().bulk_operations(course_key):
return _index_bulk_op(request, course_key, chapter, section, position)
# pylint: disable=too-many-statements
def _index_bulk_op(request, course_key, chapter, section, position):
"""
Render the index page for the specified course.
"""
# Verify that position a string is in fact an int
if position is not None:
try:
int(position)
except ValueError:
raise Http404(u"Position {} is not an integer!".format(position))
user = request.user
course = get_course_with_access(user, 'load', course_key, depth=2)
staff_access = has_access(user, 'staff', course)
registered = registered_for_course(course, user)
if not registered:
# TODO (vshnayder): do course instructors need to be registered to see course?
log.debug(u'User %s tried to view course %s but is not enrolled', user, course.location.to_deprecated_string())
return redirect(reverse('about_course', args=[course_key.to_deprecated_string()]))
# see if all pre-requisites (as per the milestones app feature) have been fulfilled
# Note that if the pre-requisite feature flag has been turned off (default) then this check will
# always pass
if not has_access(user, 'view_courseware_with_prerequisites', course):
# prerequisites have not been fulfilled therefore redirect to the Dashboard
log.info(
u'User %d tried to view course %s '
u'without fulfilling prerequisites',
user.id, unicode(course.id))
return redirect(reverse('dashboard'))
# Entrance Exam Check
# If the course has an entrance exam and the requested chapter is NOT the entrance exam, and
# the user hasn't yet met the criteria to bypass the entrance exam, redirect them to the exam.
if chapter and course_has_entrance_exam(course):
chapter_descriptor = course.get_child_by(lambda m: m.location.name == chapter)
if chapter_descriptor and not getattr(chapter_descriptor, 'is_entrance_exam', False) \
and user_must_complete_entrance_exam(request, user, course):
log.info(u'User %d tried to view course %s without passing entrance exam', user.id, unicode(course.id))
return redirect(reverse('courseware', args=[unicode(course.id)]))
# check to see if there is a required survey that must be taken before
# the user can access the course.
if survey.utils.must_answer_survey(course, user):
return redirect(reverse('course_survey', args=[unicode(course.id)]))
masquerade = setup_masquerade(request, course_key, staff_access)
try:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_key, user, course, depth=2)
course_module = get_module_for_descriptor(user, request, course, field_data_cache, course_key)
if course_module is None:
log.warning(u'If you see this, something went wrong: if we got this'
u' far, should have gotten a course module for this user')
return redirect(reverse('about_course', args=[course_key.to_deprecated_string()]))
studio_url = get_studio_url(course, 'course')
context = {
'csrf': csrf(request)['csrf_token'],
'accordion': render_accordion(request, course, chapter, section, field_data_cache),
'COURSE_TITLE': course.display_name_with_default,
'course': course,
'init': '',
'fragment': Fragment(),
'staff_access': staff_access,
'studio_url': studio_url,
'masquerade': masquerade,
'xqa_server': settings.FEATURES.get('XQA_SERVER', "http://your_xqa_server.com"),
}
now = datetime.now(UTC())
effective_start = _adjust_start_date_for_beta_testers(user, course, course_key)
if not in_preview_mode() and staff_access and now < effective_start:
# Disable student view button if user is staff and
# course is not yet visible to students.
context['disable_student_access'] = True
has_content = course.has_children_at_depth(CONTENT_DEPTH)
if not has_content:
# Show empty courseware for a course with no units
return render_to_response('courseware/courseware.html', context)
elif chapter is None:
# Check first to see if we should instead redirect the user to an Entrance Exam
if course_has_entrance_exam(course):
exam_chapter = get_entrance_exam_content(request, course)
if exam_chapter:
exam_section = None
if exam_chapter.get_children():
exam_section = exam_chapter.get_children()[0]
if exam_section:
return redirect('courseware_section',
course_id=unicode(course_key),
chapter=exam_chapter.url_name,
section=exam_section.url_name)
# passing CONTENT_DEPTH avoids returning 404 for a course with an
# empty first section and a second section with content
return redirect_to_course_position(course_module, CONTENT_DEPTH)
# Only show the chat if it's enabled by the course and in the
# settings.
show_chat = course.show_chat and settings.FEATURES['ENABLE_CHAT']
if show_chat:
context['chat'] = chat_settings(course, user)
# If we couldn't load the chat settings, then don't show
# the widget in the courseware.
if context['chat'] is None:
show_chat = False
context['show_chat'] = show_chat
chapter_descriptor = course.get_child_by(lambda m: m.location.name == chapter)
if chapter_descriptor is not None:
save_child_position(course_module, chapter)
else:
raise Http404('No chapter descriptor found with name {}'.format(chapter))
chapter_module = course_module.get_child_by(lambda m: m.location.name == chapter)
if chapter_module is None:
# User may be trying to access a chapter that isn't live yet
if masquerade and masquerade.role == 'student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masquerading as student: no chapter %s', chapter)
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
raise Http404
if course_has_entrance_exam(course):
# Message should not appear outside the context of entrance exam subsection.
# if section is none then we don't need to show message on welcome back screen also.
if getattr(chapter_module, 'is_entrance_exam', False) and section is not None:
context['entrance_exam_current_score'] = get_entrance_exam_score(request, course)
context['entrance_exam_passed'] = user_has_passed_entrance_exam(request, course)
if section is not None:
section_descriptor = chapter_descriptor.get_child_by(lambda m: m.location.name == section)
if section_descriptor is None:
# Specifically asked-for section doesn't exist
if masquerade and masquerade.role == 'student': # don't 404 if staff is masquerading as student
log.debug('staff masquerading as student: no section %s', section)
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
raise Http404
## Allow chromeless operation
if section_descriptor.chrome:
chrome = [s.strip() for s in section_descriptor.chrome.lower().split(",")]
if 'accordion' not in chrome:
context['disable_accordion'] = True
if 'tabs' not in chrome:
context['disable_tabs'] = True
if section_descriptor.default_tab:
context['default_tab'] = section_descriptor.default_tab
# cdodge: this looks silly, but let's refetch the section_descriptor with depth=None
# which will prefetch the children more efficiently than doing a recursive load
section_descriptor = modulestore().get_item(section_descriptor.location, depth=None)
# Load all descendants of the section, because we're going to display its
# html, which in general will need all of its children
field_data_cache.add_descriptor_descendents(
section_descriptor, depth=None
)
section_module = get_module_for_descriptor(
request.user,
request,
section_descriptor,
field_data_cache,
course_key,
position
)
if section_module is None:
# User may be trying to be clever and access something
# they don't have access to.
raise Http404
# Save where we are in the chapter
save_child_position(chapter_module, section)
context['fragment'] = section_module.render(STUDENT_VIEW)
context['section_title'] = section_descriptor.display_name_with_default
else:
# section is none, so display a message
studio_url = get_studio_url(course, 'course')
prev_section = get_current_child(chapter_module)
if prev_section is None:
# Something went wrong -- perhaps this chapter has no sections visible to the user.
# Clearing out the last-visited state and showing "first-time" view by redirecting
# to courseware.
course_module.position = None
course_module.save()
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
prev_section_url = reverse('courseware_section', kwargs={
'course_id': course_key.to_deprecated_string(),
'chapter': chapter_descriptor.url_name,
'section': prev_section.url_name
})
context['fragment'] = Fragment(content=render_to_string(
'courseware/welcome-back.html',
{
'course': course,
'studio_url': studio_url,
'chapter_module': chapter_module,
'prev_section': prev_section,
'prev_section_url': prev_section_url
}
))
result = render_to_response('courseware/courseware.html', context)
except Exception as e:
# Doesn't bar Unicode characters from URL, but if Unicode characters do
# cause an error it is a graceful failure.
if isinstance(e, UnicodeEncodeError):
raise Http404("URL contains Unicode characters")
if isinstance(e, Http404):
# let it propagate
raise
# In production, don't want to let a 500 out for any reason
if settings.DEBUG:
raise
else:
log.exception(
u"Error in index view: user={user}, course={course}, chapter={chapter}"
u" section={section} position={position}".format(
user=user,
course=course,
chapter=chapter,
section=section,
position=position
))
try:
result = render_to_response('courseware/courseware-error.html', {
'staff_access': staff_access,
'course': course
})
except:
# Let the exception propagate, relying on global config to at
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
return result
@ensure_csrf_cookie
@ensure_valid_course_key
def jump_to_id(request, course_id, module_id):
"""
This entry point allows for a shorter version of a jump to where just the id of the element is
passed in. This assumes that id is unique within the course_id namespace
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
items = modulestore().get_items(course_key, qualifiers={'name': module_id})
if len(items) == 0:
raise Http404(
u"Could not find id: {0} in course_id: {1}. Referer: {2}".format(
module_id, course_id, request.META.get("HTTP_REFERER", "")
))
if len(items) > 1:
log.warning(
u"Multiple items found with id: {0} in course_id: {1}. Referer: {2}. Using first: {3}".format(
module_id, course_id, request.META.get("HTTP_REFERER", ""), items[0].location.to_deprecated_string()
))
return jump_to(request, course_id, items[0].location.to_deprecated_string())
@ensure_csrf_cookie
def jump_to(_request, course_id, location):
"""
Show the page that contains a specific location.
If the location is invalid or not in any class, return a 404.
Otherwise, delegates to the index view to figure out whether this user
has access, and what they should see.
"""
try:
course_key = CourseKey.from_string(course_id)
usage_key = UsageKey.from_string(location).replace(course_key=course_key)
except InvalidKeyError:
raise Http404(u"Invalid course_key or usage_key")
try:
redirect_url = get_redirect_url(course_key, usage_key)
except ItemNotFoundError:
raise Http404(u"No data at this location: {0}".format(usage_key))
except NoPathToItem:
raise Http404(u"This location is not in any class: {0}".format(usage_key))
return redirect(redirect_url)
@ensure_csrf_cookie
@ensure_valid_course_key
def course_info(request, course_id):
"""
Display the course's info.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
with modulestore().bulk_operations(course_key):
course = get_course_with_access(request.user, 'load', course_key)
# If the user needs to take an entrance exam to access this course, then we'll need
# to send them to that specific course module before allowing them into other areas
if user_must_complete_entrance_exam(request, request.user, course):
return redirect(reverse('courseware', args=[unicode(course.id)]))
# check to see if there is a required survey that must be taken before
# the user can access the course.
if request.user.is_authenticated() and survey.utils.must_answer_survey(course, request.user):
return redirect(reverse('course_survey', args=[unicode(course.id)]))
staff_access = has_access(request.user, 'staff', course)
masquerade = setup_masquerade(request, course_key, staff_access) # allow staff to masquerade on the info page
studio_url = get_studio_url(course, 'course_info')
# link to where the student should go to enroll in the course:
# about page if there is not marketing site, SITE_NAME if there is
url_to_enroll = reverse(course_about, args=[course_id])
if settings.FEATURES.get('ENABLE_MKTG_SITE'):
url_to_enroll = marketing_link('COURSES')
show_enroll_banner = request.user.is_authenticated() and not CourseEnrollment.is_enrolled(request.user, course.id)
context = {
'request': request,
'course_id': course_key.to_deprecated_string(),
'cache': None,
'course': course,
'staff_access': staff_access,
'masquerade': masquerade,
'studio_url': studio_url,
'show_enroll_banner': show_enroll_banner,
'url_to_enroll': url_to_enroll,
}
now = datetime.now(UTC())
effective_start = _adjust_start_date_for_beta_testers(request.user, course, course_key)
if not in_preview_mode() and staff_access and now < effective_start:
# Disable student view button if user is staff and
# course is not yet visible to students.
context['disable_student_access'] = True
return render_to_response('courseware/info.html', context)
@ensure_csrf_cookie
@ensure_valid_course_key
def static_tab(request, course_id, tab_slug):
"""
Display the courses tab with the given name.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
tab = CourseTabList.get_tab_by_slug(course.tabs, tab_slug)
if tab is None:
raise Http404
contents = get_static_tab_contents(
request,
course,
tab
)
if contents is None:
raise Http404
return render_to_response('courseware/static_tab.html', {
'course': course,
'tab': tab,
'tab_contents': contents,
})
@ensure_csrf_cookie
@ensure_valid_course_key
def syllabus(request, course_id):
"""
Display the course's syllabus.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = has_access(request.user, 'staff', course)
return render_to_response('courseware/syllabus.html', {
'course': course,
'staff_access': staff_access,
})
def registered_for_course(course, user):
"""
Return True if user is registered for course, else False
"""
if user is None:
return False
if user.is_authenticated():
return CourseEnrollment.is_enrolled(user, course.id)
else:
return False
def get_cosmetic_display_price(course, registration_price):
"""
Return Course Price as a string preceded by correct currency, or 'Free'
"""
currency_symbol = settings.PAID_COURSE_REGISTRATION_CURRENCY[1]
price = course.cosmetic_display_price
if registration_price > 0:
price = registration_price
if price:
# Translators: This will look like '$50', where {currency_symbol} is a symbol such as '$' and {price} is a
# numerical amount in that currency. Adjust this display as needed for your language.
return _("{currency_symbol}{price}").format(currency_symbol=currency_symbol, price=price)
else:
# Translators: This refers to the cost of the course. In this case, the course costs nothing so it is free.
return _('Free')
@ensure_csrf_cookie
@cache_if_anonymous()
def course_about(request, course_id):
"""
Display the course's about page.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
with modulestore().bulk_operations(course_key):
permission_name = microsite.get_value(
'COURSE_ABOUT_VISIBILITY_PERMISSION',
settings.COURSE_ABOUT_VISIBILITY_PERMISSION
)
course = get_course_with_access(request.user, permission_name, course_key)
if microsite.get_value('ENABLE_MKTG_SITE', settings.FEATURES.get('ENABLE_MKTG_SITE', False)):
return redirect(reverse('info', args=[course.id.to_deprecated_string()]))
registered = registered_for_course(course, request.user)
staff_access = has_access(request.user, 'staff', course)
studio_url = get_studio_url(course, 'settings/details')
if has_access(request.user, 'load', course):
course_target = reverse('info', args=[course.id.to_deprecated_string()])
else:
course_target = reverse('about_course', args=[course.id.to_deprecated_string()])
show_courseware_link = (
(
has_access(request.user, 'load', course)
and has_access(request.user, 'view_courseware_with_prerequisites', course)
)
or settings.FEATURES.get('ENABLE_LMS_MIGRATION')
)
# Note: this is a flow for payment for course registration, not the Verified Certificate flow.
registration_price = 0
in_cart = False
reg_then_add_to_cart_link = ""
_is_shopping_cart_enabled = is_shopping_cart_enabled()
if _is_shopping_cart_enabled:
registration_price = CourseMode.min_course_price_for_currency(course_key,
settings.PAID_COURSE_REGISTRATION_CURRENCY[0])
if request.user.is_authenticated():
cart = shoppingcart.models.Order.get_cart_for_user(request.user)
in_cart = shoppingcart.models.PaidCourseRegistration.contained_in_order(cart, course_key) or \
shoppingcart.models.CourseRegCodeItem.contained_in_order(cart, course_key)
reg_then_add_to_cart_link = "{reg_url}?course_id={course_id}&enrollment_action=add_to_cart".format(
reg_url=reverse('register_user'), course_id=urllib.quote(str(course_id)))
course_price = get_cosmetic_display_price(course, registration_price)
can_add_course_to_cart = _is_shopping_cart_enabled and registration_price
# Used to provide context to message to student if enrollment not allowed
can_enroll = has_access(request.user, 'enroll', course)
invitation_only = course.invitation_only
is_course_full = CourseEnrollment.objects.is_course_full(course)
# Register button should be disabled if one of the following is true:
# - Student is already registered for course
# - Course is already full
# - Student cannot enroll in course
active_reg_button = not(registered or is_course_full or not can_enroll)
is_shib_course = uses_shib(course)
# get prerequisite courses display names
pre_requisite_courses = get_prerequisite_courses_display(course)
return render_to_response('courseware/course_about.html', {
'course': course,
'staff_access': staff_access,
'studio_url': studio_url,
'registered': registered,
'course_target': course_target,
'is_cosmetic_price_enabled': settings.FEATURES.get('ENABLE_COSMETIC_DISPLAY_PRICE'),
'course_price': course_price,
'in_cart': in_cart,
'reg_then_add_to_cart_link': reg_then_add_to_cart_link,
'show_courseware_link': show_courseware_link,
'is_course_full': is_course_full,
'can_enroll': can_enroll,
'invitation_only': invitation_only,
'active_reg_button': active_reg_button,
'is_shib_course': is_shib_course,
# We do not want to display the internal courseware header, which is used when the course is found in the
# context. This value is therefor explicitly set to render the appropriate header.
'disable_courseware_header': True,
'can_add_course_to_cart': can_add_course_to_cart,
'cart_link': reverse('shoppingcart.views.show_cart'),
'pre_requisite_courses': pre_requisite_courses
})
@ensure_csrf_cookie
@cache_if_anonymous('org')
@ensure_valid_course_key
def mktg_course_about(request, course_id):
"""This is the button that gets put into an iframe on the Drupal site."""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
permission_name = microsite.get_value(
'COURSE_ABOUT_VISIBILITY_PERMISSION',
settings.COURSE_ABOUT_VISIBILITY_PERMISSION
)
course = get_course_with_access(request.user, permission_name, course_key)
except (ValueError, Http404):
# If a course does not exist yet, display a "Coming Soon" button
return render_to_response(
'courseware/mktg_coming_soon.html', {'course_id': course_key.to_deprecated_string()}
)
registered = registered_for_course(course, request.user)
if has_access(request.user, 'load', course):
course_target = reverse('info', args=[course.id.to_deprecated_string()])
else:
course_target = reverse('about_course', args=[course.id.to_deprecated_string()])
allow_registration = has_access(request.user, 'enroll', course)
show_courseware_link = (has_access(request.user, 'load', course) or
settings.FEATURES.get('ENABLE_LMS_MIGRATION'))
course_modes = CourseMode.modes_for_course_dict(course.id)
context = {
'course': course,
'registered': registered,
'allow_registration': allow_registration,
'course_target': course_target,
'show_courseware_link': show_courseware_link,
'course_modes': course_modes,
}
# The edx.org marketing site currently displays only in English.
# To avoid displaying a different language in the register / access button,
# we force the language to English.
# However, OpenEdX installations with a different marketing front-end
# may want to respect the language specified by the user or the site settings.
force_english = settings.FEATURES.get('IS_EDX_DOMAIN', False)
if force_english:
translation.activate('en-us')
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
# Drupal will pass organization names using a GET parameter, as follows:
# ?org=Harvard
# ?org=Harvard,MIT
# If no full names are provided, the marketing iframe won't show the
# email opt-in checkbox.
org = request.GET.get('org')
if org:
org_list = org.split(',')
# HTML-escape the provided organization names
org_list = [cgi.escape(org) for org in org_list]
if len(org_list) > 1:
if len(org_list) > 2:
# Translators: The join of three or more institution names (e.g., Harvard, MIT, and Dartmouth).
org_name_string = _("{first_institutions}, and {last_institution}").format(
first_institutions=u", ".join(org_list[:-1]),
last_institution=org_list[-1]
)
else:
# Translators: The join of two institution names (e.g., Harvard and MIT).
org_name_string = _("{first_institution} and {second_institution}").format(
first_institution=org_list[0],
second_institution=org_list[1]
)
else:
org_name_string = org_list[0]
context['checkbox_label'] = ungettext(
"I would like to receive email from {institution_series} and learn about its other programs.",
"I would like to receive email from {institution_series} and learn about their other programs.",
len(org_list)
).format(institution_series=org_name_string)
try:
return render_to_response('courseware/mktg_course_about.html', context)
finally:
# Just to be safe, reset the language if we forced it to be English.
if force_english:
translation.deactivate()
@login_required
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@transaction.commit_manually
@ensure_valid_course_key
def progress(request, course_id, student_id=None):
"""
Wraps "_progress" with the manual_transaction context manager just in case
there are unanticipated errors.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
with modulestore().bulk_operations(course_key):
with grades.manual_transaction():
return _progress(request, course_key, student_id)
def _progress(request, course_key, student_id):
"""
Unwrapped version of "progress".
User progress. We show the grade bar and every problem score.
Course staff are allowed to see the progress of students in their class.
"""
course = get_course_with_access(request.user, 'load', course_key, depth=None, check_if_enrolled=True)
# check to see if there is a required survey that must be taken before
# the user can access the course.
if survey.utils.must_answer_survey(course, request.user):
return redirect(reverse('course_survey', args=[unicode(course.id)]))
staff_access = has_access(request.user, 'staff', course)
if student_id is None or student_id == request.user.id:
# always allowed to see your own profile
student = request.user
else:
# Requesting access to a different student's profile
if not staff_access:
raise Http404
try:
student = User.objects.get(id=student_id)
# Check for ValueError if 'student_id' cannot be converted to integer.
except (ValueError, User.DoesNotExist):
raise Http404
# NOTE: To make sure impersonation by instructor works, use
# student instead of request.user in the rest of the function.
# The pre-fetching of groups is done to make auth checks not require an
# additional DB lookup (this kills the Progress page in particular).
student = User.objects.prefetch_related("groups").get(id=student.id)
courseware_summary = grades.progress_summary(student, request, course)
studio_url = get_studio_url(course, 'settings/grading')
grade_summary = grades.grade(student, request, course)
if courseware_summary is None:
#This means the student didn't have access to the course (which the instructor requested)
raise Http404
# checking certificate generation configuration
show_generate_cert_btn = certs_api.cert_generation_enabled(course_key)
context = {
'course': course,
'courseware_summary': courseware_summary,
'studio_url': studio_url,
'grade_summary': grade_summary,
'staff_access': staff_access,
'student': student,
'passed': is_course_passed(course, grade_summary),
'show_generate_cert_btn': show_generate_cert_btn
}
if show_generate_cert_btn:
context.update(certs_api.certificate_downloadable_status(student, course_key))
# showing the certificate web view button if feature flags are enabled.
if settings.FEATURES.get('CERTIFICATES_HTML_VIEW', False):
if certs_api.get_active_web_certificate(course) is not None:
context.update({
'show_cert_web_view': True,
'cert_web_view_url': u'{url}'.format(
url=certs_api.get_certificate_url(user_id=student.id, course_id=unicode(course.id))
)
})
else:
context.update({
'is_downloadable': False,
'is_generating': True,
'download_url': None
})
with grades.manual_transaction():
response = render_to_response('courseware/progress.html', context)
return response
@login_required
@ensure_valid_course_key
def submission_history(request, course_id, student_username, location):
"""Render an HTML fragment (meant for inclusion elsewhere) that renders a
history of all state changes made by this user for this problem location.
Right now this only works for problems because that's all
StudentModuleHistory records.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
usage_key = course_key.make_usage_key_from_deprecated_string(location)
except (InvalidKeyError, AssertionError):
return HttpResponse(escape(_(u'Invalid location.')))
course = get_course_with_access(request.user, 'load', course_key)
staff_access = has_access(request.user, 'staff', course)
# Permission Denied if they don't have staff access and are trying to see
# somebody else's submission history.
if (student_username != request.user.username) and (not staff_access):
raise PermissionDenied
try:
student = User.objects.get(username=student_username)
student_module = StudentModule.objects.get(
course_id=course_key,
module_state_key=usage_key,
student_id=student.id
)
except User.DoesNotExist:
return HttpResponse(escape(_(u'User {username} does not exist.').format(username=student_username)))
except StudentModule.DoesNotExist:
return HttpResponse(escape(_(u'User {username} has never accessed problem {location}').format(
username=student_username,
location=location
)))
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
# If no history records exist, let's force a save to get history started.
if not history_entries:
student_module.save()
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
context = {
'history_entries': history_entries,
'username': student.username,
'location': location,
'course_id': course_key.to_deprecated_string()
}
return render_to_response('courseware/submission_history.html', context)
def notification_image_for_tab(course_tab, user, course):
"""
Returns the notification image path for the given course_tab if applicable, otherwise None.
"""
tab_notification_handlers = {
StaffGradingTab.type: open_ended_notifications.staff_grading_notifications,
PeerGradingTab.type: open_ended_notifications.peer_grading_notifications,
OpenEndedGradingTab.type: open_ended_notifications.combined_notifications
}
if course_tab.name in tab_notification_handlers:
notifications = tab_notification_handlers[course_tab.name](course, user)
if notifications and notifications['pending_grading']:
return notifications['img_path']
return None
def get_static_tab_contents(request, course, tab):
"""
Returns the contents for the given static tab
"""
loc = course.id.make_usage_key(
tab.type,
tab.url_slug,
)
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, modulestore().get_item(loc), depth=0
)
tab_module = get_module(
request.user, request, loc, field_data_cache, static_asset_path=course.static_asset_path
)
logging.debug('course_module = {0}'.format(tab_module))
html = ''
if tab_module is not None:
try:
html = tab_module.render(STUDENT_VIEW).content
except Exception: # pylint: disable=broad-except
html = render_to_string('courseware/error-message.html', None)
log.exception(
u"Error rendering course={course}, tab={tab_url}".format(course=course, tab_url=tab['url_slug'])
)
return html
@require_GET
@ensure_valid_course_key
def get_course_lti_endpoints(request, course_id):
"""
View that, given a course_id, returns the a JSON object that enumerates all of the LTI endpoints for that course.
The LTI 2.0 result service spec at
http://www.imsglobal.org/lti/ltiv2p0/uml/purl.imsglobal.org/vocab/lis/v2/outcomes/Result/service.html
says "This specification document does not prescribe a method for discovering the endpoint URLs." This view
function implements one way of discovering these endpoints, returning a JSON array when accessed.
Arguments:
request (django request object): the HTTP request object that triggered this view function
course_id (unicode): id associated with the course
Returns:
(django response object): HTTP response. 404 if course is not found, otherwise 200 with JSON body.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
course = get_course(course_key, depth=2)
except ValueError:
return HttpResponse(status=404)
anonymous_user = AnonymousUser()
anonymous_user.known = False # make these "noauth" requests like module_render.handle_xblock_callback_noauth
lti_descriptors = modulestore().get_items(course.id, qualifiers={'category': 'lti'})
lti_noauth_modules = [
get_module_for_descriptor(
anonymous_user,
request,
descriptor,
FieldDataCache.cache_for_descriptor_descendents(
course_key,
anonymous_user,
descriptor
),
course_key
)
for descriptor in lti_descriptors
]
endpoints = [
{
'display_name': module.display_name,
'lti_2_0_result_service_json_endpoint': module.get_outcome_service_url(
service_name='lti_2_0_result_rest_handler') + "/user/{anon_user_id}",
'lti_1_1_result_service_xml_endpoint': module.get_outcome_service_url(
service_name='grade_handler'),
}
for module in lti_noauth_modules
]
return HttpResponse(json.dumps(endpoints), content_type='application/json')
@login_required
def course_survey(request, course_id):
"""
URL endpoint to present a survey that is associated with a course_id
Note that the actual implementation of course survey is handled in the
views.py file in the Survey Djangoapp
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
redirect_url = reverse('info', args=[course_id])
# if there is no Survey associated with this course,
# then redirect to the course instead
if not course.course_survey_name:
return redirect(redirect_url)
return survey.views.view_student_survey(
request.user,
course.course_survey_name,
course=course,
redirect_url=redirect_url,
is_required=course.course_survey_required,
)
def is_course_passed(course, grade_summary=None, student=None, request=None):
"""
check user's course passing status. return True if passed
Arguments:
course : course object
grade_summary (dict) : contains student grade details.
student : user object
request (HttpRequest)
Returns:
returns bool value
"""
nonzero_cutoffs = [cutoff for cutoff in course.grade_cutoffs.values() if cutoff > 0]
success_cutoff = min(nonzero_cutoffs) if nonzero_cutoffs else None
if grade_summary is None:
grade_summary = grades.grade(student, request, course)
return success_cutoff and grade_summary['percent'] >= success_cutoff
@require_POST
def generate_user_cert(request, course_id):
"""Start generating a new certificate for the user.
Certificate generation is allowed if:
* The user has passed the course, and
* The user does not already have a pending/completed certificate.
Note that if an error occurs during certificate generation
(for example, if the queue is down), then we simply mark the
certificate generation task status as "error" and re-run
the task with a management command. To students, the certificate
will appear to be "generating" until it is re-run.
Args:
request (HttpRequest): The POST request to this view.
course_id (unicode): The identifier for the course.
Returns:
HttpResponse: 200 on success, 400 if a new certificate cannot be generated.
"""
if not request.user.is_authenticated():
log.info(u"Anon user trying to generate certificate for %s", course_id)
return HttpResponseBadRequest(
_('You must be signed in to {platform_name} to create a certificate.').format(
platform_name=settings.PLATFORM_NAME
)
)
student = request.user
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key, depth=2)
if not course:
return HttpResponseBadRequest(_("Course is not valid"))
if not is_course_passed(course, None, student, request):
return HttpResponseBadRequest(_("Your certificate will be available when you pass the course."))
certificate_status = certs_api.certificate_downloadable_status(student, course.id)
if certificate_status["is_generating"]:
return HttpResponseBadRequest(_("Certificate is being created."))
else:
# If the certificate is not already in-process or completed,
# then create a new certificate generation task.
# If the certificate cannot be added to the queue, this will
# mark the certificate with "error" status, so it can be re-run
# with a management command. From the user's perspective,
# it will appear that the certificate task was submitted successfully.
certs_api.generate_user_certificates(student, course.id)
_track_successful_certificate_generation(student.id, course.id)
return HttpResponse()
def _track_successful_certificate_generation(user_id, course_id): # pylint: disable=invalid-name
"""
Track a successful certificate generation event.
Arguments:
user_id (str): The ID of the user generting the certificate.
course_id (CourseKey): Identifier for the course.
Returns:
None
"""
if settings.FEATURES.get('SEGMENT_IO_LMS') and hasattr(settings, 'SEGMENT_IO_LMS_KEY'):
event_name = 'edx.bi.user.certificate.generate' # pylint: disable=no-member
tracking_context = tracker.get_tracker().resolve_context() # pylint: disable=no-member
analytics.track(
user_id,
event_name,
{
'category': 'certificates',
'label': unicode(course_id)
},
context={
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
@require_http_methods(["GET", "POST"])
def render_xblock(request, usage_key_string, check_if_enrolled=True):
"""
Returns an HttpResponse with HTML content for the xBlock with the given usage_key.
The returned HTML is a chromeless rendering of the xBlock (excluding content of the containing courseware).
"""
usage_key = UsageKey.from_string(usage_key_string)
usage_key = usage_key.replace(course_key=modulestore().fill_in_run(usage_key.course_key))
course_key = usage_key.course_key
with modulestore().bulk_operations(course_key):
# verify the user has access to the course, including enrollment check
course = get_course_with_access(request.user, 'load', course_key, check_if_enrolled=check_if_enrolled)
# get the block, which verifies whether the user has access to the block.
block, _ = get_module_by_usage_id(
request, unicode(course_key), unicode(usage_key), disable_staff_debug_info=True
)
context = {
'fragment': block.render('student_view', context=request.GET),
'course': course,
'disable_accordion': True,
'allow_iframing': True,
'disable_header': True,
'disable_window_wrap': True,
'disable_preview_menu': True,
'staff_access': has_access(request.user, 'staff', course),
'xqa_server': settings.FEATURES.get('XQA_SERVER', 'http://your_xqa_server.com'),
}
return render_to_response('courseware/courseware-chromeless.html', context)
|
rhndg/openedx
|
lms/djangoapps/courseware/views.py
|
Python
|
agpl-3.0
| 56,833
|
[
"VisIt"
] |
10d174d1c5319f0c9346827db00182c3bdf33e136500c7da889b6eb337a59754
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2007-2009 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
""" Stock transfer wizard definition """
from decimal import Decimal
import gtk
from kiwi.datatypes import ValidationError
from kiwi.ui.objectlist import Column
from stoqlib.api import api
from stoqlib.domain.fiscal import Invoice
from stoqlib.domain.person import Branch, Employee
from stoqlib.domain.sellable import Sellable
from stoqlib.domain.transfer import TransferOrder, TransferOrderItem
from stoqlib.domain.views import ProductWithStockBranchView
from stoqlib.gui.base.columns import AccessorColumn
from stoqlib.gui.base.dialogs import run_dialog
from stoqlib.gui.base.wizards import (BaseWizard, WizardEditorStep)
from stoqlib.gui.dialogs.batchselectiondialog import BatchDecreaseSelectionDialog
from stoqlib.gui.dialogs.missingitemsdialog import (get_missing_items,
MissingItemsDialog)
from stoqlib.gui.editors.transfereditor import TransferItemEditor
from stoqlib.gui.events import StockTransferWizardFinishEvent
from stoqlib.gui.utils.printing import print_report
from stoqlib.gui.wizards.abstractwizard import SellableItemStep
from stoqlib.lib.formatters import format_sellable_description
from stoqlib.lib.message import warning, yesno
from stoqlib.lib.pluginmanager import get_plugin_manager
from stoqlib.lib.translation import stoqlib_gettext
from stoqlib.reporting.transfer import TransferOrderReceipt
_ = stoqlib_gettext
#
# Wizard steps
#
class StockTransferInitialStep(WizardEditorStep):
gladefile = 'StockTransferInitialStep'
model_type = TransferOrder
proxy_widgets = ['open_date',
'destination_branch',
'source_responsible',
'invoice_number',
'comments']
def __init__(self, wizard, store, model):
self.branch = api.get_current_branch(store)
WizardEditorStep.__init__(self, store, wizard, model)
def setup_proxies(self):
self._setup_widgets()
self.proxy = self.add_proxy(self.wizard.model, self.proxy_widgets)
# Force the user to select a branch, avoiding transfering to the wrong
# branch by mistake
self.destination_branch.update(None)
def _setup_widgets(self):
branches = Branch.get_active_remote_branches(self.store)
self.destination_branch.prefill(api.for_person_combo(branches))
self.source_branch.set_text(self.branch.get_description())
employees = self.store.find(Employee)
self.source_responsible.prefill(api.for_person_combo(employees))
manager = get_plugin_manager()
nfe_is_active = manager.is_active('nfe')
self.invoice_number.set_property('mandatory', nfe_is_active)
# Set an initial invoice number to TransferOrder and Invoice
if not self.model.invoice_number:
new_invoice_number = Invoice.get_next_invoice_number(self.store)
self.model.invoice_number = new_invoice_number
def _validate_destination_branch(self):
other_branch = self.destination_branch.read()
if not self.branch.is_from_same_company(other_branch):
warning(_(u"Branches are not from the same CNPJ"))
return False
return True
#
# WizardStep hooks
#
def post_init(self):
self.register_validate_function(self.wizard.refresh_next)
self.force_validation()
def has_next_step(self):
return True
def next_step(self):
return StockTransferItemStep(self.wizard, self, self.store,
self.wizard.model)
def validate_step(self):
return self._validate_destination_branch()
def on_invoice_number__validate(self, widget, value):
if not 0 < value <= 999999999:
return ValidationError(
_("Invoice number must be between 1 and 999999999"))
invoice = self.model.invoice
branch = self.model.branch
if invoice.check_unique_invoice_number_by_branch(value, branch):
return ValidationError(_(u'Invoice number already used.'))
class StockTransferItemStep(SellableItemStep):
model_type = TransferOrder
item_table = TransferOrderItem
sellable_view = ProductWithStockBranchView
batch_selection_dialog = BatchDecreaseSelectionDialog
validate_stock = True
cost_editable = False
item_editor = TransferItemEditor
def __init__(self, wizard, previous, store, model):
manager = get_plugin_manager()
nfe_is_active = manager.is_active('nfe')
if nfe_is_active:
self.cost_editable = True
SellableItemStep.__init__(self, wizard, previous, store, model)
#
# SellableItemStep hooks
#
def get_sellable_view_query(self):
sellable_query = Sellable.get_unblocked_sellables_query(self.store,
storable=False)
return self.sellable_view, sellable_query
def get_saved_items(self):
return list(self.model.get_items())
def get_order_item(self, sellable, cost, quantity, batch=None):
return self.model.add_sellable(sellable, batch, quantity, cost)
def get_columns(self):
return [
Column('sellable.description', title=_(u'Description'),
data_type=str, expand=True, searchable=True,
format_func=self._format_description, format_func_data=True),
AccessorColumn('stock', title=_(u'Stock'), data_type=Decimal,
accessor=self._get_stock_quantity, width=80),
Column('quantity', title=_(u'Transfer'), data_type=Decimal,
width=100),
AccessorColumn('total', title=_(u'Total'), data_type=Decimal,
accessor=self._get_total_quantity, width=80),
]
def _format_description(self, item, data):
return format_sellable_description(item.sellable, item.batch)
def _get_stock_quantity(self, item):
if not item.sellable.product.manage_stock:
return
storable = item.sellable.product_storable
stock_item = storable.get_stock_item(self.model.branch, item.batch)
return stock_item.quantity or 0
def _get_total_quantity(self, item):
if not item.sellable.product.manage_stock:
return
qty = self._get_stock_quantity(item)
qty -= item.quantity
if qty > 0:
return qty
return 0
def _setup_summary(self):
self.summary = None
def sellable_selected(self, sellable, batch=None):
SellableItemStep.sellable_selected(self, sellable, batch=batch)
if sellable is None or not sellable.product.manage_stock:
return
storable = sellable.product_storable
# FIXME: We should not have to override this method. This should
# be done automatically on SellableItemStep
self.stock_quantity.set_label(
"%s" % storable.get_balance_for_branch(branch=self.model.branch))
def setup_slaves(self):
SellableItemStep.setup_slaves(self)
#
# WizardStep hooks
#
def post_init(self):
self.hide_add_button()
SellableItemStep.post_init(self)
def has_next_step(self):
return False
#
# Main wizard
#
class StockTransferWizard(BaseWizard):
title = _(u'Stock Transfer')
size = (750, 350)
def __init__(self, store):
self.model = self._create_model(store)
first_step = StockTransferInitialStep(self, store, self.model)
BaseWizard.__init__(self, store, first_step, self.model)
def _create_model(self, store):
user = api.get_current_user(store)
source_responsible = store.find(Employee, person=user.person).one()
return TransferOrder(
source_branch=api.get_current_branch(store),
source_responsible=source_responsible,
destination_branch=Branch.get_active_remote_branches(store)[0],
store=store)
def _receipt_dialog(self, order):
msg = _('Would you like to print a receipt for this transfer?')
if yesno(msg, gtk.RESPONSE_YES, _("Print receipt"), _("Don't print")):
print_report(TransferOrderReceipt, order)
def finish(self):
missing = get_missing_items(self.model, self.store)
if missing:
run_dialog(MissingItemsDialog, self, self.model, missing)
return False
self.model.send()
self.retval = self.model
self.close()
StockTransferWizardFinishEvent.emit(self.model)
# Commit before printing to avoid losing data if something breaks
self.store.confirm(self.retval)
self._receipt_dialog(self.model)
|
andrebellafronte/stoq
|
stoqlib/gui/wizards/stocktransferwizard.py
|
Python
|
gpl-2.0
| 9,677
|
[
"VisIt"
] |
45041a6f9032dfa23c3cd036847fa65b88bac857af0eeffa3118f35e54ca4476
|
from __future__ import annotations
import pytest
def test(dials_data):
from dxtbx.model.experiment_list import ExperimentListFactory
from dials.algorithms.spot_prediction import PixelToMillerIndex
from dials.array_family import flex
filename = dials_data("centroid_test_data").join("experiments.json").strpath
experiments = ExperimentListFactory.from_json_file(filename)
transform = PixelToMillerIndex(
experiments[0].beam,
experiments[0].detector,
experiments[0].goniometer,
experiments[0].scan,
experiments[0].crystal,
)
reflections = flex.reflection_table.from_predictions(experiments[0])
for r in reflections.rows():
panel = r["panel"]
x, y, z = r["xyzcal.px"]
h0 = r["miller_index"]
h1 = transform.h(panel, x, y, z)
assert h0 == pytest.approx(h1, abs=1e-7)
|
dials/dials
|
tests/algorithms/spot_prediction/test_pixel_to_miller_index.py
|
Python
|
bsd-3-clause
| 887
|
[
"CRYSTAL"
] |
d8efd8117d10b7ea312cd0d68a550c3407cd094e0051324401de0695cdcf9e7b
|
# http://www.mail-archive.com/pymol-users@lists.sourceforge.net/msg06211.html
from pymol import cmd
def print_hb(selection):
hb = cmd.find_pairs("((byres "+selection+") and n;n)","((byres "+selection+") and n;o)",mode=1,cutoff=3.7,angle=55)
pair1_list = []
pair2_list = []
dist_list = []
for pairs in hb:
cmd.iterate("%s and ID %s" % (pairs[0][0],pairs[0][1]), 'print "%s/%3s`%s/%s " % (chain,resn,resi,name),')
cmd.iterate("%s and ID %s" % (pairs[1][0],pairs[1][1]), 'print "%s/%3s`%s/%s " % (chain,resn,resi,name),')
print "%.2f" % cmd.dist("%s and ID %s" % (pairs[0][0],pairs[0][1]),"%s and ID %s" % (pairs[1][0],pairs[1][1]))
cmd.extend("print_hb",print_hb)
|
jtprince/hydrogen_bondifier
|
reference/campbell_find_hb.py
|
Python
|
mit
| 690
|
[
"PyMOL"
] |
05cfbc622339ae19bca666be8840a25718686edfdb5220eb7d7f0c48153a9b49
|
#!/usr/bin/env python2
#
# Copyright (C) 2020(H)
# Jozef Stefan Institute
# Max Planck Institute for Polymer Research
# Copyright (C) 2013-2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# -*- coding: utf-8 -*-
import espressopp
import mpi4py.MPI as MPI
import unittest
class TestCaseLangevinThermostatOnGroup(unittest.TestCase):
def setUp(self):
system = espressopp.System()
box=(10,10,10)
system.bc = espressopp.bc.OrthorhombicBC(system.rng, box)
system.rng = espressopp.esutil.RNG(54321)
system.skin = 0.3
system.comm = MPI.COMM_WORLD
self.system = system
nodeGrid = espressopp.tools.decomp.nodeGrid(espressopp.MPI.COMM_WORLD.size)
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, rc=1.5, skin=0.3)
system.storage = espressopp.storage.DomainDecomposition(system,nodeGrid,cellGrid)
particle_lists = [
(1, espressopp.Real3D(0, 1, 2), espressopp.Real3D(0, 0, 0)),
(2, espressopp.Real3D(0, 1, 2), espressopp.Real3D(0, 0, 0)),
(3, espressopp.Real3D(0, 1, 2), espressopp.Real3D(0, 0, 0)),
(4, espressopp.Real3D(0, 1, 2), espressopp.Real3D(0, 0, 0)),
(5, espressopp.Real3D(0, 1, 2), espressopp.Real3D(0, 0, 0))
]
self.thermo_group_pids = [1, 2, 3]
self.non_thermo_group_pids = [4, 5]
system.storage.addParticles(particle_lists, 'id', 'pos', 'v')
self.thermo_group = espressopp.ParticleGroup(system.storage)
for p in self.thermo_group_pids:
self.thermo_group.add(p)
self.integrator = espressopp.integrator.VelocityVerlet(system)
self.integrator.dt = 0.001
def test_thermalize_only_group(self):
"""Run thermostat only on particles in ParticleGroup."""
langevin = espressopp.integrator.LangevinThermostatOnGroup(self.system, self.thermo_group)
langevin.gamma = 1.0
langevin.temperature = 1.0
self.integrator.addExtension(langevin)
self.integrator.run(1)
# Compare the forces on particles, 4 and 5 == 0
forces = [self.system.storage.getParticle(p).f for p in range(1, 6)]
for p in self.thermo_group_pids:
f = self.system.storage.getParticle(p).f
self.assertNotEqual(f, espressopp.Real3D())
for p in self.non_thermo_group_pids:
f = self.system.storage.getParticle(p).f
self.assertEqual(f, espressopp.Real3D())
if __name__ == '__main__':
unittest.main()
|
govarguz/espressopp
|
testsuite/langevin_thermostat_on_group/langevin_thermostat_on_group.py
|
Python
|
gpl-3.0
| 3,244
|
[
"ESPResSo"
] |
e8be2c94cf295b80e90ec8e62e7ccf826454ce1384537f7cbd5183cee1960c5f
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com> 2016
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SIZE_RANGES = {
'Y': 1 << 80,
'Z': 1 << 70,
'E': 1 << 60,
'P': 1 << 50,
'T': 1 << 40,
'G': 1 << 30,
'M': 1 << 20,
'K': 1 << 10,
'B': 1,
}
FILE_ATTRIBUTES = {
'A': 'noatime',
'a': 'append',
'c': 'compressed',
'C': 'nocow',
'd': 'nodump',
'D': 'dirsync',
'e': 'extents',
'E': 'encrypted',
'h': 'blocksize',
'i': 'immutable',
'I': 'indexed',
'j': 'journalled',
'N': 'inline',
's': 'zero',
'S': 'synchronous',
't': 'notail',
'T': 'blockroot',
'u': 'undelete',
'X': 'compressedraw',
'Z': 'compresseddirty',
}
# ansible modules can be written in any language. To simplify
# development of Python modules, the functions available here can
# be used to do many common tasks
import locale
import os
import re
import shlex
import subprocess
import sys
import types
import time
import select
import shutil
import stat
import tempfile
import traceback
import grp
import pwd
import platform
import errno
import datetime
from collections import deque
from collections import Mapping, MutableMapping, Sequence, MutableSequence, Set, MutableSet
from itertools import repeat, chain
try:
import syslog
HAS_SYSLOG = True
except ImportError:
HAS_SYSLOG = False
try:
from systemd import journal
has_journal = True
except ImportError:
has_journal = False
HAVE_SELINUX = False
try:
import selinux
HAVE_SELINUX = True
except ImportError:
pass
# Python2 & 3 way to get NoneType
NoneType = type(None)
# Note: When getting Sequence from collections, it matches with strings. If
# this matters, make sure to check for strings before checking for sequencetype
try:
from collections.abc import KeysView
SEQUENCETYPE = (Sequence, KeysView)
except:
SEQUENCETYPE = Sequence
try:
import json
# Detect the python-json library which is incompatible
# Look for simplejson if that's the case
try:
if not isinstance(json.loads, types.FunctionType) or not isinstance(json.dumps, types.FunctionType):
raise ImportError
except AttributeError:
raise ImportError
except ImportError:
try:
import simplejson as json
except ImportError:
print('\n{"msg": "Error: ansible requires the stdlib json or simplejson module, neither was found!", "failed": true}')
sys.exit(1)
except SyntaxError:
print('\n{"msg": "SyntaxError: probably due to installed simplejson being for a different python version", "failed": true}')
sys.exit(1)
AVAILABLE_HASH_ALGORITHMS = dict()
try:
import hashlib
# python 2.7.9+ and 2.7.0+
for attribute in ('available_algorithms', 'algorithms'):
algorithms = getattr(hashlib, attribute, None)
if algorithms:
break
if algorithms is None:
# python 2.5+
algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
for algorithm in algorithms:
AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm)
except ImportError:
import sha
AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha}
try:
import md5
AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5
except ImportError:
pass
from ansible.module_utils.pycompat24 import get_exception, literal_eval
from ansible.module_utils.six import (
PY2,
PY3,
b,
binary_type,
integer_types,
iteritems,
string_types,
text_type,
)
from ansible.module_utils.six.moves import map, reduce, shlex_quote
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible.module_utils.parsing.convert_bool import BOOLEANS, BOOLEANS_FALSE, BOOLEANS_TRUE, boolean
PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I)
_NUMBERTYPES = tuple(list(integer_types) + [float])
# Deprecated compat. Only kept in case another module used these names Using
# ansible.module_utils.six is preferred
NUMBERTYPES = _NUMBERTYPES
imap = map
try:
# Python 2
unicode
except NameError:
# Python 3
unicode = text_type
try:
# Python 2.6+
bytes
except NameError:
# Python 2.4
bytes = binary_type
try:
# Python 2
basestring
except NameError:
# Python 3
basestring = string_types
_literal_eval = literal_eval
# End of deprecated names
# Internal global holding passed in params. This is consulted in case
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
# attempt to read from stdin. Other code should not use this directly as it
# is an internal implementation detail
_ANSIBLE_ARGS = None
FILE_COMMON_ARGUMENTS = dict(
src=dict(),
mode=dict(type='raw'),
owner=dict(),
group=dict(),
seuser=dict(),
serole=dict(),
selevel=dict(),
setype=dict(),
follow=dict(type='bool', default=False),
# not taken by the file module, but other modules call file so it must ignore them.
content=dict(no_log=True),
backup=dict(),
force=dict(),
remote_src=dict(), # used by assemble
regexp=dict(), # used by assemble
delimiter=dict(), # used by assemble
directory_mode=dict(), # used by copy
unsafe_writes=dict(type='bool'), # should be available to any module using atomic_move
attributes=dict(aliases=['attr']),
)
PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
# Can't use 07777 on Python 3, can't use 0o7777 on Python 2.4
PERM_BITS = int('07777', 8) # file mode permission bits
EXEC_PERM_BITS = int('00111', 8) # execute permission bits
DEFAULT_PERM = int('0666', 8) # default file permission bits
def get_platform():
''' what's the platform? example: Linux is a platform. '''
return platform.system()
def get_distribution():
''' return the distribution name '''
if platform.system() == 'Linux':
try:
supported_dists = platform._supported_dists + ('arch', 'alpine', 'devuan')
distribution = platform.linux_distribution(supported_dists=supported_dists)[0].capitalize()
if not distribution and os.path.isfile('/etc/system-release'):
distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize()
if 'Amazon' in distribution:
distribution = 'Amazon'
else:
distribution = 'OtherLinux'
except:
# FIXME: MethodMissing, I assume?
distribution = platform.dist()[0].capitalize()
else:
distribution = None
return distribution
def get_distribution_version():
''' return the distribution version '''
if platform.system() == 'Linux':
try:
distribution_version = platform.linux_distribution()[1]
if not distribution_version and os.path.isfile('/etc/system-release'):
distribution_version = platform.linux_distribution(supported_dists=['system'])[1]
except:
# FIXME: MethodMissing, I assume?
distribution_version = platform.dist()[1]
else:
distribution_version = None
return distribution_version
def get_all_subclasses(cls):
'''
used by modules like Hardware or Network fact classes to retrieve all subclasses of a given class.
__subclasses__ return only direct sub classes. This one go down into the class tree.
'''
# Retrieve direct subclasses
subclasses = cls.__subclasses__()
to_visit = list(subclasses)
# Then visit all subclasses
while to_visit:
for sc in to_visit:
# The current class is now visited, so remove it from list
to_visit.remove(sc)
# Appending all subclasses to visit and keep a reference of available class
for ssc in sc.__subclasses__():
subclasses.append(ssc)
to_visit.append(ssc)
return subclasses
def load_platform_subclass(cls, *args, **kwargs):
'''
used by modules like User to have different implementations based on detected platform. See User
module for an example.
'''
this_platform = get_platform()
distribution = get_distribution()
subclass = None
# get the most specific superclass for this platform
if distribution is not None:
for sc in get_all_subclasses(cls):
if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform:
subclass = sc
if subclass is None:
for sc in get_all_subclasses(cls):
if sc.platform == this_platform and sc.distribution is None:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
def json_dict_unicode_to_bytes(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, text_type):
return to_bytes(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_unicode_to_bytes, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
else:
return d
def json_dict_bytes_to_unicode(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, binary_type):
# Warning, can traceback
return to_text(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_bytes_to_unicode, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
else:
return d
def return_values(obj):
""" Return native stringified values from datastructures.
For use with removing sensitive values pre-jsonification."""
if isinstance(obj, (text_type, binary_type)):
if obj:
yield to_native(obj, errors='surrogate_or_strict')
return
elif isinstance(obj, SEQUENCETYPE):
for element in obj:
for subelement in return_values(element):
yield subelement
elif isinstance(obj, Mapping):
for element in obj.items():
for subelement in return_values(element[1]):
yield subelement
elif isinstance(obj, (bool, NoneType)):
# This must come before int because bools are also ints
return
elif isinstance(obj, NUMBERTYPES):
yield to_native(obj, nonstring='simplerepr')
else:
raise TypeError('Unknown parameter type: %s, %s' % (type(obj), obj))
def _remove_values_conditions(value, no_log_strings, deferred_removals):
"""
Helper function for :meth:`remove_values`.
:arg value: The value to check for strings that need to be stripped
:arg no_log_strings: set of strings which must be stripped out of any values
:arg deferred_removals: List which holds information about nested
containers that have to be iterated for removals. It is passed into
this function so that more entries can be added to it if value is
a container type. The format of each entry is a 2-tuple where the first
element is the ``value`` parameter and the second value is a new
container to copy the elements of ``value`` into once iterated.
:returns: if ``value`` is a scalar, returns ``value`` with two exceptions:
1. :class:`~datetime.datetime` objects which are changed into a string representation.
2. objects which are in no_log_strings are replaced with a placeholder
so that no sensitive data is leaked.
If ``value`` is a container type, returns a new empty container.
``deferred_removals`` is added to as a side-effect of this function.
.. warning:: It is up to the caller to make sure the order in which value
is passed in is correct. For instance, higher level containers need
to be passed in before lower level containers. For example, given
``{'level1': {'level2': 'level3': [True]} }`` first pass in the
dictionary for ``level1``, then the dict for ``level2``, and finally
the list for ``level3``.
"""
if isinstance(value, (text_type, binary_type)):
# Need native str type
native_str_value = value
if isinstance(value, text_type):
value_is_text = True
if PY2:
native_str_value = to_bytes(value, errors='surrogate_or_strict')
elif isinstance(value, binary_type):
value_is_text = False
if PY3:
native_str_value = to_text(value, errors='surrogate_or_strict')
if native_str_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
native_str_value = native_str_value.replace(omit_me, '*' * 8)
if value_is_text and isinstance(native_str_value, binary_type):
value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
elif not value_is_text and isinstance(native_str_value, text_type):
value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
else:
value = native_str_value
elif isinstance(value, Sequence):
if isinstance(value, MutableSequence):
new_value = type(value)()
else:
new_value = [] # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Set):
if isinstance(value, MutableSet):
new_value = type(value)()
else:
new_value = set() # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Mapping):
if isinstance(value, MutableMapping):
new_value = type(value)()
else:
new_value = {} # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, tuple(chain(NUMBERTYPES, (bool, NoneType)))):
stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict')
if stringy_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
if omit_me in stringy_value:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
elif isinstance(value, datetime.datetime):
value = value.isoformat()
else:
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
return value
def remove_values(value, no_log_strings):
""" Remove strings in no_log_strings from value. If value is a container
type, then remove a lot more"""
deferred_removals = deque()
no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings]
new_value = _remove_values_conditions(value, no_log_strings, deferred_removals)
while deferred_removals:
old_data, new_data = deferred_removals.popleft()
if isinstance(new_data, Mapping):
for old_key, old_elem in old_data.items():
new_elem = _remove_values_conditions(old_elem, no_log_strings, deferred_removals)
new_data[old_key] = new_elem
else:
for elem in old_data:
new_elem = _remove_values_conditions(elem, no_log_strings, deferred_removals)
if isinstance(new_data, MutableSequence):
new_data.append(new_elem)
elif isinstance(new_data, MutableSet):
new_data.add(new_elem)
else:
raise TypeError('Unknown container type encountered when removing private values from output')
return new_value
def heuristic_log_sanitize(data, no_log_values=None):
''' Remove strings that look like passwords from log messages '''
# Currently filters:
# user:pass@foo/whatever and http://username:pass@wherever/foo
# This code has false positives and consumes parts of logs that are
# not passwds
# begin: start of a passwd containing string
# end: end of a passwd containing string
# sep: char between user and passwd
# prev_begin: where in the overall string to start a search for
# a passwd
# sep_search_end: where in the string to end a search for the sep
data = to_native(data)
output = []
begin = len(data)
prev_begin = begin
sep = 1
while sep:
# Find the potential end of a passwd
try:
end = data.rindex('@', 0, begin)
except ValueError:
# No passwd in the rest of the data
output.insert(0, data[0:begin])
break
# Search for the beginning of a passwd
sep = None
sep_search_end = end
while not sep:
# URL-style username+password
try:
begin = data.rindex('://', 0, sep_search_end)
except ValueError:
# No url style in the data, check for ssh style in the
# rest of the string
begin = 0
# Search for separator
try:
sep = data.index(':', begin + 3, end)
except ValueError:
# No separator; choices:
if begin == 0:
# Searched the whole string so there's no password
# here. Return the remaining data
output.insert(0, data[0:begin])
break
# Search for a different beginning of the password field.
sep_search_end = begin
continue
if sep:
# Password was found; remove it.
output.insert(0, data[end:prev_begin])
output.insert(0, '********')
output.insert(0, data[begin:sep + 1])
prev_begin = begin
output = ''.join(output)
if no_log_values:
output = remove_values(output, no_log_values)
return output
def bytes_to_human(size, isbits=False, unit=None):
base = 'Bytes'
if isbits:
base = 'bits'
suffix = ''
for suffix, limit in sorted(iteritems(SIZE_RANGES), key=lambda item: -item[1]):
if (unit is None and size >= limit) or unit is not None and unit.upper() == suffix[0]:
break
if limit != 1:
suffix += base[0]
else:
suffix = base
return '%.2f %s' % (float(size) / limit, suffix)
def human_to_bytes(number, default_unit=None, isbits=False):
'''
Convert number in string format into bytes (ex: '2K' => 2048) or using unit argument
ex:
human_to_bytes('10M') <=> human_to_bytes(10, 'M')
'''
m = re.search('^\s*(\d*\.?\d*)\s*([A-Za-z]+)?', str(number), flags=re.IGNORECASE)
if m is None:
raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number))
try:
num = float(m.group(1))
except:
raise ValueError("human_to_bytes() can't interpret following number: %s (original input string: %s)" % (m.group(1), number))
unit = m.group(2)
if unit is None:
unit = default_unit
if unit is None:
''' No unit given, returning raw number '''
return int(round(num))
range_key = unit[0].upper()
try:
limit = SIZE_RANGES[range_key]
except:
raise ValueError("human_to_bytes() failed to convert %s (unit = %s). The suffix must be one of %s" % (number, unit, ", ".join(SIZE_RANGES.keys())))
# default value
unit_class = 'B'
unit_class_name = 'byte'
# handling bits case
if isbits:
unit_class = 'b'
unit_class_name = 'bit'
# check unit value if more than one character (KB, MB)
if len(unit) > 1:
expect_message = 'expect %s%s or %s' % (range_key, unit_class, range_key)
if range_key == 'B':
expect_message = 'expect %s or %s' % (unit_class, unit_class_name)
if unit_class_name in unit.lower():
pass
elif unit[1] != unit_class:
raise ValueError("human_to_bytes() failed to convert %s. Value is not a valid string (%s)" % (number, expect_message))
return int(round(num * limit))
def is_executable(path):
'''is the given path executable?
Limitations:
* Does not account for FSACLs.
* Most times we really want to know "Can the current user execute this
file" This function does not tell us that, only if an execute bit is set.
'''
# These are all bitfields so first bitwise-or all the permissions we're
# looking for, then bitwise-and with the file's mode to determine if any
# execute bits are set.
return ((stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & os.stat(path)[stat.ST_MODE])
def _load_params():
''' read the modules parameters and store them globally.
This function may be needed for certain very dynamic custom modules which
want to process the parameters that are being handed the module. Since
this is so closely tied to the implementation of modules we cannot
guarantee API stability for it (it may change between versions) however we
will try not to break it gratuitously. It is certainly more future-proof
to call this function and consume its outputs than to implement the logic
inside it as a copy in your own code.
'''
global _ANSIBLE_ARGS
if _ANSIBLE_ARGS is not None:
buffer = _ANSIBLE_ARGS
else:
# debug overrides to read args from file or cmdline
# Avoid tracebacks when locale is non-utf8
# We control the args and we pass them as utf8
if len(sys.argv) > 1:
if os.path.isfile(sys.argv[1]):
fd = open(sys.argv[1], 'rb')
buffer = fd.read()
fd.close()
else:
buffer = sys.argv[1]
if PY3:
buffer = buffer.encode('utf-8', errors='surrogateescape')
# default case, read from stdin
else:
if PY2:
buffer = sys.stdin.read()
else:
buffer = sys.stdin.buffer.read()
_ANSIBLE_ARGS = buffer
try:
params = json.loads(buffer.decode('utf-8'))
except ValueError:
# This helper used too early for fail_json to work.
print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
if PY2:
params = json_dict_unicode_to_bytes(params)
try:
return params['ANSIBLE_MODULE_ARGS']
except KeyError:
# This helper does not have access to fail_json so we have to print
# json output on our own.
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", '
'"failed": true}')
sys.exit(1)
def env_fallback(*args, **kwargs):
''' Load value from environment '''
for arg in args:
if arg in os.environ:
return os.environ[arg]
else:
raise AnsibleFallbackNotFound
def _lenient_lowercase(lst):
"""Lowercase elements of a list.
If an element is not a string, pass it through untouched.
"""
lowered = []
for value in lst:
try:
lowered.append(value.lower())
except AttributeError:
lowered.append(value)
return lowered
def format_attributes(attributes):
attribute_list = []
for attr in attributes:
if attr in FILE_ATTRIBUTES:
attribute_list.append(FILE_ATTRIBUTES[attr])
return attribute_list
def get_flags_from_attributes(attributes):
flags = []
for key, attr in FILE_ATTRIBUTES.items():
if attr in attributes:
flags.append(key)
return ''.join(flags)
class AnsibleFallbackNotFound(Exception):
pass
class _SetEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Set):
return list(obj)
return super(_SetEncoder, self).default(obj)
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=True, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None):
'''
common code for quickly building an ansible module in Python
(although you can write modules in anything that can return JSON)
see library/* for examples
'''
self._name = os.path.basename(__file__) # initialize name until we can parse from options
self.argument_spec = argument_spec
self.supports_check_mode = supports_check_mode
self.check_mode = False
self.no_log = no_log
self.cleanup_files = []
self._debug = False
self._diff = False
self._socket_path = None
self._verbosity = 0
# May be used to set modifications to the environment for any
# run_command invocation
self.run_command_environ_update = {}
self._warnings = []
self._deprecations = []
self.aliases = {}
self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity',
'_ansible_selinux_special_fs', '_ansible_module_name', '_ansible_version', '_ansible_syslog_facility',
'_ansible_socket']
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in self.argument_spec:
self.argument_spec[k] = v
self._load_params()
self._set_fallbacks()
# append to legal_inputs and then possibly check against them
try:
self.aliases = self._handle_aliases()
except Exception:
e = get_exception()
# Use exceptions here because it isn't safe to call fail_json until no_log is processed
print('\n{"failed": true, "msg": "Module alias error: %s"}' % str(e))
sys.exit(1)
# Save parameter values that should never be logged
self.no_log_values = set()
# Use the argspec to determine which args are no_log
for arg_name, arg_opts in self.argument_spec.items():
if arg_opts.get('no_log', False):
# Find the value for the no_log'd param
no_log_object = self.params.get(arg_name, None)
if no_log_object:
self.no_log_values.update(return_values(no_log_object))
if arg_opts.get('removed_in_version') is not None and arg_name in self.params:
self._deprecations.append({
'msg': "Param '%s' is deprecated. See the module docs for more information" % arg_name,
'version': arg_opts.get('removed_in_version')
})
# check the locale as set by the current environment, and reset to
# a known valid (LANG=C) if it's an invalid/unavailable locale
self._check_locale()
self._check_arguments(check_invalid_arguments)
# check exclusive early
if not bypass_checks:
self._check_mutually_exclusive(mutually_exclusive)
self._set_defaults(pre=True)
self._CHECK_ARGUMENT_TYPES_DISPATCHER = {
'str': self._check_type_str,
'list': self._check_type_list,
'dict': self._check_type_dict,
'bool': self._check_type_bool,
'int': self._check_type_int,
'float': self._check_type_float,
'path': self._check_type_path,
'raw': self._check_type_raw,
'jsonarg': self._check_type_jsonarg,
'json': self._check_type_jsonarg,
'bytes': self._check_type_bytes,
'bits': self._check_type_bits,
}
if not bypass_checks:
self._check_required_arguments()
self._check_argument_types()
self._check_argument_values()
self._check_required_together(required_together)
self._check_required_one_of(required_one_of)
self._check_required_if(required_if)
self._set_defaults(pre=False)
if not self.no_log:
self._log_invocation()
# finally, make sure we're in a sane working dir
self._set_cwd()
def warn(self, warning):
if isinstance(warning, string_types):
self._warnings.append(warning)
self.log('[WARNING] %s' % warning)
else:
raise TypeError("warn requires a string not a %s" % type(warning))
def deprecate(self, msg, version=None):
if isinstance(msg, string_types):
self._deprecations.append({
'msg': msg,
'version': version
})
self.log('[DEPRECATION WARNING] %s %s' % (msg, version))
else:
raise TypeError("deprecate requires a string not a %s" % type(msg))
def load_file_common_arguments(self, params):
'''
many modules deal with files, this encapsulates common
options that the file module accepts such that it is directly
available to all modules and they can share code.
'''
path = params.get('path', params.get('dest', None))
if path is None:
return {}
else:
path = os.path.expanduser(os.path.expandvars(path))
b_path = to_bytes(path, errors='surrogate_or_strict')
# if the path is a symlink, and we're following links, get
# the target of the link instead for testing
if params.get('follow', False) and os.path.islink(b_path):
b_path = os.path.realpath(b_path)
path = to_native(b_path)
mode = params.get('mode', None)
owner = params.get('owner', None)
group = params.get('group', None)
# selinux related options
seuser = params.get('seuser', None)
serole = params.get('serole', None)
setype = params.get('setype', None)
selevel = params.get('selevel', None)
secontext = [seuser, serole, setype]
if self.selinux_mls_enabled():
secontext.append(selevel)
default_secontext = self.selinux_default_context(path)
for i in range(len(default_secontext)):
if i is not None and secontext[i] == '_default':
secontext[i] = default_secontext[i]
attributes = params.get('attributes', None)
return dict(
path=path, mode=mode, owner=owner, group=group,
seuser=seuser, serole=serole, setype=setype,
selevel=selevel, secontext=secontext, attributes=attributes,
)
# Detect whether using selinux that is MLS-aware.
# While this means you can set the level/range with
# selinux.lsetfilecon(), it may or may not mean that you
# will get the selevel as part of the context returned
# by selinux.lgetfilecon().
def selinux_mls_enabled(self):
if not HAVE_SELINUX:
return False
if selinux.is_selinux_mls_enabled() == 1:
return True
else:
return False
def selinux_enabled(self):
if not HAVE_SELINUX:
seenabled = self.get_bin_path('selinuxenabled')
if seenabled is not None:
(rc, out, err) = self.run_command(seenabled)
if rc == 0:
self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
return False
if selinux.is_selinux_enabled() == 1:
return True
else:
return False
# Determine whether we need a placeholder for selevel/mls
def selinux_initial_context(self):
context = [None, None, None]
if self.selinux_mls_enabled():
context.append(None)
return context
# If selinux fails to find a default, return an array of None
def selinux_default_context(self, path, mode=0):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.matchpathcon(to_native(path, errors='surrogate_or_strict'), mode)
except OSError:
return context
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def selinux_context(self, path):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict'))
except OSError:
e = get_exception()
if e.errno == errno.ENOENT:
self.fail_json(path=path, msg='path %s does not exist' % path)
else:
self.fail_json(path=path, msg='failed to retrieve selinux context')
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def user_and_group(self, path, expand=True):
b_path = to_bytes(path, errors='surrogate_then_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
st = os.lstat(b_path)
uid = st.st_uid
gid = st.st_gid
return (uid, gid)
def find_mount_point(self, path):
path_is_bytes = False
if isinstance(path, binary_type):
path_is_bytes = True
b_path = os.path.realpath(to_bytes(os.path.expanduser(os.path.expandvars(path)), errors='surrogate_or_strict'))
while not os.path.ismount(b_path):
b_path = os.path.dirname(b_path)
if path_is_bytes:
return b_path
return to_text(b_path, errors='surrogate_or_strict')
def is_special_selinux_path(self, path):
"""
Returns a tuple containing (True, selinux_context) if the given path is on a
NFS or other 'special' fs mount point, otherwise the return will be (False, None).
"""
try:
f = open('/proc/mounts', 'r')
mount_data = f.readlines()
f.close()
except:
return (False, None)
path_mount_point = self.find_mount_point(path)
for line in mount_data:
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
if path_mount_point == mount_point:
for fs in self._selinux_special_fs:
if fs in fstype:
special_context = self.selinux_context(path_mount_point)
return (True, special_context)
return (False, None)
def set_default_selinux_context(self, path, changed):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
def set_context_if_different(self, path, context, changed, diff=None):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
cur_context = self.selinux_context(path)
new_context = list(cur_context)
# Iterate over the current context instead of the
# argument context, which may have selevel.
(is_special_se, sp_context) = self.is_special_selinux_path(path)
if is_special_se:
new_context = sp_context
else:
for i in range(len(cur_context)):
if len(context) > i:
if context[i] is not None and context[i] != cur_context[i]:
new_context[i] = context[i]
elif context[i] is None:
new_context[i] = cur_context[i]
if cur_context != new_context:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['secontext'] = cur_context
if 'after' not in diff:
diff['after'] = {}
diff['after']['secontext'] = new_context
try:
if self.check_mode:
return True
rc = selinux.lsetfilecon(to_native(path),
str(':'.join(new_context)))
except OSError:
e = get_exception()
self.fail_json(path=path, msg='invalid selinux context: %s' % str(e), new_context=new_context, cur_context=cur_context, input_was=context)
if rc != 0:
self.fail_json(path=path, msg='set selinux context failed')
changed = True
return changed
def set_owner_if_different(self, path, owner, changed, diff=None, expand=True):
b_path = to_bytes(path, errors='surrogate_then_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path = to_text(b_path, errors='surrogate_then_strict')
if owner is None:
return changed
orig_uid, orig_gid = self.user_and_group(path, expand)
try:
uid = int(owner)
except ValueError:
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
if orig_uid != uid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['owner'] = orig_uid
if 'after' not in diff:
diff['after'] = {}
diff['after']['owner'] = uid
if self.check_mode:
return True
try:
os.lchown(b_path, uid, -1)
except OSError:
self.fail_json(path=path, msg='chown failed')
changed = True
return changed
def set_group_if_different(self, path, group, changed, diff=None, expand=True):
b_path = to_bytes(path, errors='surrogate_then_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path = to_text(b_path, errors='surrogate_then_strict')
if group is None:
return changed
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
if orig_gid != gid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['group'] = orig_gid
if 'after' not in diff:
diff['after'] = {}
diff['after']['group'] = gid
if self.check_mode:
return True
try:
os.lchown(b_path, -1, gid)
except OSError:
self.fail_json(path=path, msg='chgrp failed')
changed = True
return changed
def set_mode_if_different(self, path, mode, changed, diff=None, expand=True):
b_path = to_bytes(path, errors='surrogate_then_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path = to_text(b_path, errors='surrogate_then_strict')
path_stat = os.lstat(b_path)
if mode is None:
return changed
if not isinstance(mode, int):
try:
mode = int(mode, 8)
except Exception:
try:
mode = self._symbolic_mode_to_octal(path_stat, mode)
except Exception:
e = get_exception()
self.fail_json(path=path,
msg="mode must be in octal or symbolic form",
details=str(e))
if mode != stat.S_IMODE(mode):
# prevent mode from having extra info orbeing invalid long number
self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
prev_mode = stat.S_IMODE(path_stat.st_mode)
if prev_mode != mode:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['mode'] = '0%03o' % prev_mode
if 'after' not in diff:
diff['after'] = {}
diff['after']['mode'] = '0%03o' % mode
if self.check_mode:
return True
# FIXME: comparison against string above will cause this to be executed
# every time
try:
if hasattr(os, 'lchmod'):
os.lchmod(b_path, mode)
else:
if not os.path.islink(b_path):
os.chmod(b_path, mode)
else:
# Attempt to set the perms of the symlink but be
# careful not to change the perms of the underlying
# file while trying
underlying_stat = os.stat(b_path)
os.chmod(b_path, mode)
new_underlying_stat = os.stat(b_path)
if underlying_stat.st_mode != new_underlying_stat.st_mode:
os.chmod(b_path, stat.S_IMODE(underlying_stat.st_mode))
except OSError:
e = get_exception()
if os.path.islink(b_path) and e.errno == errno.EPERM: # Can't set mode on symbolic links
pass
elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
pass
else:
raise e
except Exception:
e = get_exception()
self.fail_json(path=path, msg='chmod failed', details=str(e))
path_stat = os.lstat(b_path)
new_mode = stat.S_IMODE(path_stat.st_mode)
if new_mode != prev_mode:
changed = True
return changed
def set_attributes_if_different(self, path, attributes, changed, diff=None, expand=True):
if attributes is None:
return changed
b_path = to_bytes(path, errors='surrogate_then_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path = to_text(b_path, errors='surrogate_then_strict')
existing = self.get_file_attributes(b_path)
if existing.get('attr_flags', '') != attributes:
attrcmd = self.get_bin_path('chattr')
if attrcmd:
attrcmd = [attrcmd, '=%s' % attributes, b_path]
changed = True
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['attributes'] = existing.get('attr_flags')
if 'after' not in diff:
diff['after'] = {}
diff['after']['attributes'] = attributes
if not self.check_mode:
try:
rc, out, err = self.run_command(attrcmd)
if rc != 0 or err:
raise Exception("Error while setting attributes: %s" % (out + err))
except:
e = get_exception()
self.fail_json(path=path, msg='chattr failed', details=str(e))
return changed
def get_file_attributes(self, path):
output = {}
attrcmd = self.get_bin_path('lsattr', False)
if attrcmd:
attrcmd = [attrcmd, '-vd', path]
try:
rc, out, err = self.run_command(attrcmd)
if rc == 0:
res = out.split(' ')[0:2]
output['attr_flags'] = res[1].replace('-', '').strip()
output['version'] = res[0].strip()
output['attributes'] = format_attributes(output['attr_flags'])
except:
pass
return output
def _symbolic_mode_to_octal(self, path_stat, symbolic_mode):
new_mode = stat.S_IMODE(path_stat.st_mode)
mode_re = re.compile(r'^(?P<users>[ugoa]+)(?P<operator>[-+=])(?P<perms>[rwxXst-]*|[ugo])$')
for mode in symbolic_mode.split(','):
match = mode_re.match(mode)
if match:
users = match.group('users')
operator = match.group('operator')
perms = match.group('perms')
if users == 'a':
users = 'ugo'
for user in users:
mode_to_apply = self._get_octal_mode_from_symbolic_perms(path_stat, user, perms)
new_mode = self._apply_operation_to_mode(user, operator, mode_to_apply, new_mode)
else:
raise ValueError("bad symbolic permission for mode: %s" % mode)
return new_mode
def _apply_operation_to_mode(self, user, operator, mode_to_apply, current_mode):
if operator == '=':
if user == 'u':
mask = stat.S_IRWXU | stat.S_ISUID
elif user == 'g':
mask = stat.S_IRWXG | stat.S_ISGID
elif user == 'o':
mask = stat.S_IRWXO | stat.S_ISVTX
# mask out u, g, or o permissions from current_mode and apply new permissions
inverse_mask = mask ^ PERM_BITS
new_mode = (current_mode & inverse_mask) | mode_to_apply
elif operator == '+':
new_mode = current_mode | mode_to_apply
elif operator == '-':
new_mode = current_mode - (current_mode & mode_to_apply)
return new_mode
def _get_octal_mode_from_symbolic_perms(self, path_stat, user, perms):
prev_mode = stat.S_IMODE(path_stat.st_mode)
is_directory = stat.S_ISDIR(path_stat.st_mode)
has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
apply_X_permission = is_directory or has_x_permissions
# Permission bits constants documented at:
# http://docs.python.org/2/library/stat.html#stat.S_ISUID
if apply_X_permission:
X_perms = {
'u': {'X': stat.S_IXUSR},
'g': {'X': stat.S_IXGRP},
'o': {'X': stat.S_IXOTH},
}
else:
X_perms = {
'u': {'X': 0},
'g': {'X': 0},
'o': {'X': 0},
}
user_perms_to_modes = {
'u': {
'r': stat.S_IRUSR,
'w': stat.S_IWUSR,
'x': stat.S_IXUSR,
's': stat.S_ISUID,
't': 0,
'u': prev_mode & stat.S_IRWXU,
'g': (prev_mode & stat.S_IRWXG) << 3,
'o': (prev_mode & stat.S_IRWXO) << 6,
},
'g': {
'r': stat.S_IRGRP,
'w': stat.S_IWGRP,
'x': stat.S_IXGRP,
's': stat.S_ISGID,
't': 0,
'u': (prev_mode & stat.S_IRWXU) >> 3,
'g': prev_mode & stat.S_IRWXG,
'o': (prev_mode & stat.S_IRWXO) << 3,
},
'o': {
'r': stat.S_IROTH,
'w': stat.S_IWOTH,
'x': stat.S_IXOTH,
's': 0,
't': stat.S_ISVTX,
'u': (prev_mode & stat.S_IRWXU) >> 6,
'g': (prev_mode & stat.S_IRWXG) >> 3,
'o': prev_mode & stat.S_IRWXO,
}
}
# Insert X_perms into user_perms_to_modes
for key, value in X_perms.items():
user_perms_to_modes[key].update(value)
def or_reduce(mode, perm):
return mode | user_perms_to_modes[user][perm]
return reduce(or_reduce, perms, 0)
def set_fs_attributes_if_different(self, file_args, changed, diff=None, expand=True):
# set modes owners and context as needed
changed = self.set_context_if_different(
file_args['path'], file_args['secontext'], changed, diff
)
changed = self.set_owner_if_different(
file_args['path'], file_args['owner'], changed, diff, expand
)
changed = self.set_group_if_different(
file_args['path'], file_args['group'], changed, diff, expand
)
changed = self.set_mode_if_different(
file_args['path'], file_args['mode'], changed, diff, expand
)
changed = self.set_attributes_if_different(
file_args['path'], file_args['attributes'], changed, diff, expand
)
return changed
def set_directory_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def set_file_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def add_path_info(self, kwargs):
'''
for results that are files, supplement the info about the file
in the return path with stats about the file path.
'''
path = kwargs.get('path', kwargs.get('dest', None))
if path is None:
return kwargs
b_path = to_bytes(path, errors='surrogate_or_strict')
if os.path.exists(b_path):
(uid, gid) = self.user_and_group(path)
kwargs['uid'] = uid
kwargs['gid'] = gid
try:
user = pwd.getpwuid(uid)[0]
except KeyError:
user = str(uid)
try:
group = grp.getgrgid(gid)[0]
except KeyError:
group = str(gid)
kwargs['owner'] = user
kwargs['group'] = group
st = os.lstat(b_path)
kwargs['mode'] = '0%03o' % stat.S_IMODE(st[stat.ST_MODE])
# secontext not yet supported
if os.path.islink(b_path):
kwargs['state'] = 'link'
elif os.path.isdir(b_path):
kwargs['state'] = 'directory'
elif os.stat(b_path).st_nlink > 1:
kwargs['state'] = 'hard'
else:
kwargs['state'] = 'file'
if HAVE_SELINUX and self.selinux_enabled():
kwargs['secontext'] = ':'.join(self.selinux_context(path))
kwargs['size'] = st[stat.ST_SIZE]
else:
kwargs['state'] = 'absent'
return kwargs
def _check_locale(self):
'''
Uses the locale module to test the currently set locale
(per the LANG and LC_CTYPE environment settings)
'''
try:
# setting the locale to '' uses the default locale
# as it would be returned by locale.getdefaultlocale()
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
# fallback to the 'C' locale, which may cause unicode
# issues but is preferable to simply failing because
# of an unknown locale
locale.setlocale(locale.LC_ALL, 'C')
os.environ['LANG'] = 'C'
os.environ['LC_ALL'] = 'C'
os.environ['LC_MESSAGES'] = 'C'
except Exception:
e = get_exception()
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % e)
def _handle_aliases(self, spec=None):
# this uses exceptions as it happens before we can safely call fail_json
aliases_results = {} # alias:canon
if spec is None:
spec = self.argument_spec
for (k, v) in spec.items():
self._legal_inputs.append(k)
aliases = v.get('aliases', None)
default = v.get('default', None)
required = v.get('required', False)
if default is not None and required:
# not alias specific but this is a good place to check this
raise Exception("internal error: required and default are mutually exclusive for %s" % k)
if aliases is None:
continue
if not isinstance(aliases, SEQUENCETYPE) or isinstance(aliases, (binary_type, text_type)):
raise Exception('internal error: aliases must be a list or tuple')
for alias in aliases:
self._legal_inputs.append(alias)
aliases_results[alias] = k
if alias in self.params:
self.params[k] = self.params[alias]
return aliases_results
def _check_arguments(self, check_invalid_arguments):
self._syslog_facility = 'LOG_USER'
unsupported_parameters = set()
for (k, v) in list(self.params.items()):
if k == '_ansible_check_mode' and v:
self.check_mode = True
elif k == '_ansible_no_log':
self.no_log = self.boolean(v)
elif k == '_ansible_debug':
self._debug = self.boolean(v)
elif k == '_ansible_diff':
self._diff = self.boolean(v)
elif k == '_ansible_verbosity':
self._verbosity = v
elif k == '_ansible_selinux_special_fs':
self._selinux_special_fs = v
elif k == '_ansible_syslog_facility':
self._syslog_facility = v
elif k == '_ansible_version':
self.ansible_version = v
elif k == '_ansible_module_name':
self._name = v
elif k == '_ansible_socket':
self._socket_path = v
elif check_invalid_arguments and k not in self._legal_inputs:
unsupported_parameters.add(k)
# clean up internal params:
if k.startswith('_ansible_'):
del self.params[k]
if unsupported_parameters:
self.fail_json(msg="Unsupported parameters for (%s) module: %s. Supported parameters include: %s" % (self._name,
','.join(sorted(list(unsupported_parameters))),
','.join(sorted(self.argument_spec.keys()))))
if self.check_mode and not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
def _count_terms(self, check):
count = 0
for term in check:
if term in self.params:
count += 1
return count
def _check_mutually_exclusive(self, spec):
if spec is None:
return
for check in spec:
count = self._count_terms(check)
if count > 1:
self.fail_json(msg="parameters are mutually exclusive: %s" % (check,))
def _check_required_one_of(self, spec):
if spec is None:
return
for check in spec:
count = self._count_terms(check)
if count == 0:
self.fail_json(msg="one of the following is required: %s" % ','.join(check))
def _check_required_together(self, spec):
if spec is None:
return
for check in spec:
counts = [self._count_terms([field]) for field in check]
non_zero = [c for c in counts if c > 0]
if len(non_zero) > 0:
if 0 in counts:
self.fail_json(msg="parameters are required together: %s" % (check,))
def _check_required_arguments(self, spec=None, param=None):
''' ensure all required arguments are present '''
missing = []
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
required = v.get('required', False)
if required and k not in param:
missing.append(k)
if len(missing) > 0:
self.fail_json(msg="missing required arguments: %s" % ",".join(missing))
def _check_required_if(self, spec):
''' ensure that parameters which conditionally required are present '''
if spec is None:
return
for sp in spec:
missing = []
max_missing_count = 0
is_one_of = False
if len(sp) == 4:
key, val, requirements, is_one_of = sp
else:
key, val, requirements = sp
# is_one_of is True at least one requirement should be
# present, else all requirements should be present.
if is_one_of:
max_missing_count = len(requirements)
if key in self.params and self.params[key] == val:
for check in requirements:
count = self._count_terms((check,))
if count == 0:
missing.append(check)
if len(missing) and len(missing) >= max_missing_count:
self.fail_json(msg="%s is %s but the following are missing: %s" % (key, val, ','.join(missing)))
def _check_argument_values(self, spec=None, param=None):
''' ensure all arguments have the requested values, and there are no stray arguments '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
choices = v.get('choices', None)
if choices is None:
continue
if isinstance(choices, SEQUENCETYPE) and not isinstance(choices, (binary_type, text_type)):
if k in param:
if param[k] not in choices:
# PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking
# the value. If we can't figure this out, module author is responsible.
lowered_choices = None
if param[k] == 'False':
lowered_choices = _lenient_lowercase(choices)
overlap = BOOLEANS_FALSE.intersection(choices)
if len(overlap) == 1:
# Extract from a set
(param[k],) = overlap
if param[k] == 'True':
if lowered_choices is None:
lowered_choices = _lenient_lowercase(choices)
overlap = BOOLEANS_TRUE.intersection(choices)
if len(overlap) == 1:
(param[k],) = overlap
if param[k] not in choices:
choices_str = ",".join([to_native(c) for c in choices])
msg = "value of %s must be one of: %s, got: %s" % (k, choices_str, param[k])
self.fail_json(msg=msg)
else:
self.fail_json(msg="internal error: choices for argument %s are not iterable: %s" % (k, choices))
def safe_eval(self, value, locals=None, include_exceptions=False):
# do not allow method calls to modules
if not isinstance(value, string_types):
# already templated to a datavaluestructure, perhaps?
if include_exceptions:
return (value, None)
return value
if re.search(r'\w\.\w+\(', value):
if include_exceptions:
return (value, None)
return value
# do not allow imports
if re.search(r'import \w+', value):
if include_exceptions:
return (value, None)
return value
try:
result = literal_eval(value)
if include_exceptions:
return (result, None)
else:
return result
except Exception:
e = get_exception()
if include_exceptions:
return (value, e)
return value
def _check_type_str(self, value):
if isinstance(value, string_types):
return value
# Note: This could throw a unicode error if value's __str__() method
# returns non-ascii. Have to port utils.to_bytes() if that happens
return str(value)
def _check_type_list(self, value):
if isinstance(value, list):
return value
if isinstance(value, string_types):
return value.split(",")
elif isinstance(value, int) or isinstance(value, float):
return [str(value)]
raise TypeError('%s cannot be converted to a list' % type(value))
def _check_type_dict(self, value):
if isinstance(value, dict):
return value
if isinstance(value, string_types):
if value.startswith("{"):
try:
return json.loads(value)
except:
(result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
if exc is not None:
raise TypeError('unable to evaluate string as dictionary')
return result
elif '=' in value:
fields = []
field_buffer = []
in_quote = False
in_escape = False
for c in value.strip():
if in_escape:
field_buffer.append(c)
in_escape = False
elif c == '\\':
in_escape = True
elif not in_quote and c in ('\'', '"'):
in_quote = c
elif in_quote and in_quote == c:
in_quote = False
elif not in_quote and c in (',', ' '):
field = ''.join(field_buffer)
if field:
fields.append(field)
field_buffer = []
else:
field_buffer.append(c)
field = ''.join(field_buffer)
if field:
fields.append(field)
return dict(x.split("=", 1) for x in fields)
else:
raise TypeError("dictionary requested, could not parse JSON or key=value")
raise TypeError('%s cannot be converted to a dict' % type(value))
def _check_type_bool(self, value):
if isinstance(value, bool):
return value
if isinstance(value, string_types) or isinstance(value, int):
return self.boolean(value)
raise TypeError('%s cannot be converted to a bool' % type(value))
def _check_type_int(self, value):
if isinstance(value, int):
return value
if isinstance(value, string_types):
return int(value)
raise TypeError('%s cannot be converted to an int' % type(value))
def _check_type_float(self, value):
if isinstance(value, float):
return value
if isinstance(value, (binary_type, text_type, int)):
return float(value)
raise TypeError('%s cannot be converted to a float' % type(value))
def _check_type_path(self, value):
value = self._check_type_str(value)
return os.path.expanduser(os.path.expandvars(value))
def _check_type_jsonarg(self, value):
# Return a jsonified string. Sometimes the controller turns a json
# string into a dict/list so transform it back into json here
if isinstance(value, (text_type, binary_type)):
return value.strip()
else:
if isinstance(value, (list, tuple, dict)):
return self.jsonify(value)
raise TypeError('%s cannot be converted to a json string' % type(value))
def _check_type_raw(self, value):
return value
def _check_type_bytes(self, value):
try:
self.human_to_bytes(value)
except ValueError:
raise TypeError('%s cannot be converted to a Byte value' % type(value))
def _check_type_bits(self, value):
try:
self.human_to_bytes(value, isbits=True)
except ValueError:
raise TypeError('%s cannot be converted to a Bit value' % type(value))
def _check_argument_types(self, spec=None, param=None):
''' ensure all arguments have the requested type '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
wanted = v.get('type', None)
if k not in param:
continue
if wanted is None:
# Mostly we want to default to str.
# For values set to None explicitly, return None instead as
# that allows a user to unset a parameter
if param[k] is None:
continue
wanted = 'str'
value = param[k]
if value is None:
continue
try:
type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted]
except KeyError:
self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
try:
param[k] = type_checker(value)
except (TypeError, ValueError):
e = get_exception()
self.fail_json(msg="argument %s is of type %s and we were unable to convert to %s: %s" % (k, type(value), wanted, e))
# deal with sub options to create sub spec
spec = None
if wanted == 'dict' or (wanted == 'list' and v.get('elements', '') == 'dict'):
spec = v.get('options', None)
if spec:
self._check_required_arguments(spec, param[k])
self._check_argument_types(spec, param[k])
self._check_argument_values(spec, param[k])
def _set_defaults(self, pre=True):
for (k, v) in self.argument_spec.items():
default = v.get('default', None)
if pre is True:
# this prevents setting defaults on required items
if default is not None and k not in self.params:
self.params[k] = default
else:
# make sure things without a default still get set None
if k not in self.params:
self.params[k] = default
def _set_fallbacks(self):
for (k, v) in self.argument_spec.items():
fallback = v.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if k not in self.params and fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
self.params[k] = fallback_strategy(*fallback_args, **fallback_kwargs)
except AnsibleFallbackNotFound:
continue
def _load_params(self):
''' read the input and set the params attribute.
This method is for backwards compatibility. The guts of the function
were moved out in 2.1 so that custom modules could read the parameters.
'''
# debug overrides to read args from file or cmdline
self.params = _load_params()
def _log_to_syslog(self, msg):
if HAS_SYSLOG:
module = 'ansible-%s' % self._name
facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
syslog.openlog(str(module), 0, facility)
syslog.syslog(syslog.LOG_INFO, msg)
def debug(self, msg):
if self._debug:
self.log('[debug] %s' % msg)
def log(self, msg, log_args=None):
if not self.no_log:
if log_args is None:
log_args = dict()
module = 'ansible-%s' % self._name
if isinstance(module, binary_type):
module = module.decode('utf-8', 'replace')
# 6655 - allow for accented characters
if not isinstance(msg, (binary_type, text_type)):
raise TypeError("msg should be a string (got %s)" % type(msg))
# We want journal to always take text type
# syslog takes bytes on py2, text type on py3
if isinstance(msg, binary_type):
journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values)
else:
# TODO: surrogateescape is a danger here on Py3
journal_msg = remove_values(msg, self.no_log_values)
if PY3:
syslog_msg = journal_msg
else:
syslog_msg = journal_msg.encode('utf-8', 'replace')
if has_journal:
journal_args = [("MODULE", os.path.basename(__file__))]
for arg in log_args:
journal_args.append((arg.upper(), str(log_args[arg])))
try:
journal.send(u"%s %s" % (module, journal_msg), **dict(journal_args))
except IOError:
# fall back to syslog since logging to journal failed
self._log_to_syslog(syslog_msg)
else:
self._log_to_syslog(syslog_msg)
def _log_invocation(self):
''' log that ansible ran the module '''
# TODO: generalize a separate log function and make log_invocation use it
# Sanitize possible password argument when logging.
log_args = dict()
for param in self.params:
canon = self.aliases.get(param, param)
arg_opts = self.argument_spec.get(canon, {})
no_log = arg_opts.get('no_log', False)
if self.boolean(no_log):
log_args[param] = 'NOT_LOGGING_PARAMETER'
# try to capture all passwords/passphrase named fields missed by no_log
elif PASSWORD_MATCH.search(param) and arg_opts.get('type', 'str') != 'bool' and not arg_opts.get('choices', False):
# skip boolean and enums as they are about 'password' state
log_args[param] = 'NOT_LOGGING_PASSWORD'
self.warn('Module did not set no_log for %s' % param)
else:
param_val = self.params[param]
if not isinstance(param_val, (text_type, binary_type)):
param_val = str(param_val)
elif isinstance(param_val, text_type):
param_val = param_val.encode('utf-8')
log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values)
msg = ['%s=%s' % (to_native(arg), to_native(val)) for arg, val in log_args.items()]
if msg:
msg = 'Invoked with %s' % ' '.join(msg)
else:
msg = 'Invoked'
self.log(msg, log_args=log_args)
def _set_cwd(self):
try:
cwd = os.getcwd()
if not os.access(cwd, os.F_OK | os.R_OK):
raise Exception()
return cwd
except:
# we don't have access to the cwd, probably because of sudo.
# Try and move to a neutral location to prevent errors
for cwd in [os.path.expandvars('$HOME'), tempfile.gettempdir()]:
try:
if os.access(cwd, os.F_OK | os.R_OK):
os.chdir(cwd)
return cwd
except:
pass
# we won't error here, as it may *not* be a problem,
# and we don't want to break modules unnecessarily
return None
def get_bin_path(self, arg, required=False, opt_dirs=[]):
'''
find system executable in PATH.
Optional arguments:
- required: if executable is not found and required is true, fail_json
- opt_dirs: optional list of directories to search in addition to PATH
if found return full path; otherwise return None
'''
sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
paths = []
for d in opt_dirs:
if d is not None and os.path.exists(d):
paths.append(d)
paths += os.environ.get('PATH', '').split(os.pathsep)
bin_path = None
# mangle PATH to include /sbin dirs
for p in sbin_paths:
if p not in paths and os.path.exists(p):
paths.append(p)
for d in paths:
if not d:
continue
path = os.path.join(d, arg)
if os.path.exists(path) and not os.path.isdir(path) and is_executable(path):
bin_path = path
break
if required and bin_path is None:
self.fail_json(msg='Failed to find required executable %s in paths: %s' % (arg, os.pathsep.join(paths)))
return bin_path
def boolean(self, arg):
''' return a bool for the arg '''
if arg is None:
return arg
try:
return boolean(arg)
except TypeError as e:
self.fail_json(msg=to_native(e))
def jsonify(self, data):
for encoding in ("utf-8", "latin-1"):
try:
return json.dumps(data, encoding=encoding, cls=_SetEncoder)
# Old systems using old simplejson module does not support encoding keyword.
except TypeError:
try:
new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
except UnicodeDecodeError:
continue
return json.dumps(new_data, cls=_SetEncoder)
except UnicodeDecodeError:
continue
self.fail_json(msg='Invalid unicode encoding encountered')
def from_json(self, data):
return json.loads(data)
def add_cleanup_file(self, path):
if path not in self.cleanup_files:
self.cleanup_files.append(path)
def do_cleanup_files(self):
for path in self.cleanup_files:
self.cleanup(path)
def _return_formatted(self, kwargs):
self.add_path_info(kwargs)
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
if 'warnings' in kwargs:
if isinstance(kwargs['warnings'], list):
for w in kwargs['warnings']:
self.warn(w)
else:
self.warn(kwargs['warnings'])
if self._warnings:
kwargs['warnings'] = self._warnings
if 'deprecations' in kwargs:
if isinstance(kwargs['deprecations'], list):
for d in kwargs['deprecations']:
if isinstance(d, SEQUENCETYPE) and len(d) == 2:
self.deprecate(d[0], version=d[1])
else:
self.deprecate(d)
else:
self.deprecate(kwargs['deprecations'])
if self._deprecations:
kwargs['deprecations'] = self._deprecations
kwargs = remove_values(kwargs, self.no_log_values)
print('\n%s' % self.jsonify(kwargs))
def exit_json(self, **kwargs):
''' return from the module, without error '''
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(0)
def fail_json(self, **kwargs):
''' return from the module, with an error message '''
assert 'msg' in kwargs, "implementation error -- msg to explain the error is required"
kwargs['failed'] = True
# add traceback if debug or high verbosity and it is missing
# Note: badly named as exception, it is really always been 'traceback'
if 'exception' not in kwargs and sys.exc_info()[2] and (self._debug or self._verbosity >= 3):
kwargs['exception'] = ''.join(traceback.format_tb(sys.exc_info()[2]))
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(1)
def fail_on_missing_params(self, required_params=None):
''' This is for checking for required params when we can not check via argspec because we
need more information than is simply given in the argspec.
'''
if not required_params:
return
missing_params = []
for required_param in required_params:
if not self.params.get(required_param):
missing_params.append(required_param)
if missing_params:
self.fail_json(msg="missing required arguments: %s" % ','.join(missing_params))
def digest_from_file(self, filename, algorithm):
''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. '''
if not os.path.exists(filename):
return None
if os.path.isdir(filename):
self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
# preserve old behaviour where the third parameter was a hash algorithm object
if hasattr(algorithm, 'hexdigest'):
digest_method = algorithm
else:
try:
digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]()
except KeyError:
self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" %
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
blocksize = 64 * 1024
infile = open(os.path.realpath(filename), 'rb')
block = infile.read(blocksize)
while block:
digest_method.update(block)
block = infile.read(blocksize)
infile.close()
return digest_method.hexdigest()
def md5(self, filename):
''' Return MD5 hex digest of local file using digest_from_file().
Do not use this function unless you have no other choice for:
1) Optional backwards compatibility
2) Compatibility with a third party protocol
This function will not work on systems complying with FIPS-140-2.
Most uses of this function can use the module.sha1 function instead.
'''
if 'md5' not in AVAILABLE_HASH_ALGORITHMS:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return self.digest_from_file(filename, 'md5')
def sha1(self, filename):
''' Return SHA1 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha1')
def sha256(self, filename):
''' Return SHA-256 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha256')
def backup_local(self, fn):
'''make a date-marked backup of the specified file, return True or False on success or failure'''
backupdest = ''
if os.path.exists(fn):
# backups named basename.PID.YYYY-MM-DD@HH:MM:SS~
ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
backupdest = '%s.%s.%s' % (fn, os.getpid(), ext)
try:
shutil.copy2(fn, backupdest)
except (shutil.Error, IOError):
e = get_exception()
self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, e))
return backupdest
def cleanup(self, tmpfile):
if os.path.exists(tmpfile):
try:
os.unlink(tmpfile)
except OSError:
e = get_exception()
sys.stderr.write("could not cleanup %s: %s" % (tmpfile, e))
def atomic_move(self, src, dest, unsafe_writes=False):
'''atomically move src to dest, copying attributes from dest, returns true on success
it uses os.rename to ensure this as it is an atomic operation, rest of the function is
to work around limitations, corner cases and ensure selinux context is saved if possible'''
context = None
dest_stat = None
b_src = to_bytes(src, errors='surrogate_or_strict')
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.exists(b_dest):
try:
dest_stat = os.stat(b_dest)
# copy mode and ownership
os.chmod(b_src, dest_stat.st_mode & PERM_BITS)
os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid)
# try to copy flags if possible
if hasattr(os, 'chflags') and hasattr(dest_stat, 'st_flags'):
try:
os.chflags(b_src, dest_stat.st_flags)
except OSError:
e = get_exception()
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and e.errno == getattr(errno, err):
break
else:
raise
except OSError:
e = get_exception()
if e.errno != errno.EPERM:
raise
if self.selinux_enabled():
context = self.selinux_context(dest)
else:
if self.selinux_enabled():
context = self.selinux_default_context(dest)
creating = not os.path.exists(b_dest)
try:
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(b_src, b_dest)
except (IOError, OSError):
e = get_exception()
if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY]:
# only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
# and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e), exception=traceback.format_exc())
else:
b_dest_dir = os.path.dirname(b_dest)
# Use bytes here. In the shippable CI, this fails with
# a UnicodeError with surrogateescape'd strings for an unknown
# reason (doesn't happen in a local Ubuntu16.04 VM)
native_dest_dir = b_dest_dir
native_suffix = os.path.basename(b_dest)
native_prefix = b('.ansible_tmp')
error_msg = None
tmp_dest_name = None
try:
tmp_dest_fd, tmp_dest_name = tempfile.mkstemp(prefix=native_prefix, dir=native_dest_dir, suffix=native_suffix)
except (OSError, IOError):
e = get_exception()
error_msg = 'The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), e)
except TypeError:
# We expect that this is happening because python3.4.x and
# below can't handle byte strings in mkstemp(). Traceback
# would end in something like:
# file = _os.path.join(dir, pre + name + suf)
# TypeError: can't concat bytes to str
error_msg = ('Failed creating temp file for atomic move. This usually happens when using Python3 less than Python3.5. '
'Please use Python2.x or Python3.5 or greater.')
finally:
if error_msg:
if unsafe_writes:
self._unsafe_writes(b_src, b_dest)
else:
self.fail_json(msg=error_msg, exception=traceback.format_exc())
if tmp_dest_name:
b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict')
try:
try:
# close tmp file handle before file operations to prevent text file busy errors on vboxfs synced folders (windows host)
os.close(tmp_dest_fd)
# leaves tmp file behind when sudo and not root
try:
shutil.move(b_src, b_tmp_dest_name)
except OSError:
# cleanup will happen by 'rm' of tempdir
# copy2 will preserve some metadata
shutil.copy2(b_src, b_tmp_dest_name)
if self.selinux_enabled():
self.set_context_if_different(
b_tmp_dest_name, context, False)
try:
tmp_stat = os.stat(b_tmp_dest_name)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
except OSError:
e = get_exception()
if e.errno != errno.EPERM:
raise
try:
os.rename(b_tmp_dest_name, b_dest)
except (shutil.Error, OSError, IOError):
e = get_exception()
if unsafe_writes and e.errno == errno.EBUSY:
self._unsafe_writes(b_tmp_dest_name, b_dest)
else:
self.fail_json(msg='Unable to rename file: %s to %s: %s' % (src, dest, e), exception=traceback.format_exc())
except (shutil.Error, OSError, IOError):
e = get_exception()
self.fail_json(msg='Failed to replace file: %s to %s: %s' % (src, dest, e), exception=traceback.format_exc())
finally:
self.cleanup(b_tmp_dest_name)
if creating:
# make sure the file has the correct permissions
# based on the current value of umask
umask = os.umask(0)
os.umask(umask)
os.chmod(b_dest, DEFAULT_PERM & ~umask)
try:
os.chown(b_dest, os.geteuid(), os.getegid())
except OSError:
# We're okay with trying our best here. If the user is not
# root (or old Unices) they won't be able to chown.
pass
if self.selinux_enabled():
# rename might not preserve context
self.set_context_if_different(dest, context, False)
def _unsafe_writes(self, src, dest):
# sadly there are some situations where we cannot ensure atomicity, but only if
# the user insists and we get the appropriate error we update the file unsafely
try:
try:
out_dest = open(dest, 'wb')
in_src = open(src, 'rb')
shutil.copyfileobj(in_src, out_dest)
finally: # assuring closed files in 2.4 compatible way
if out_dest:
out_dest.close()
if in_src:
in_src.close()
except (shutil.Error, OSError, IOError):
e = get_exception()
self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, e), exception=traceback.format_exc())
def _read_from_pipes(self, rpipes, rfds, file_descriptor):
data = b('')
if file_descriptor in rfds:
data = os.read(file_descriptor.fileno(), 9000)
if data == b(''):
rpipes.remove(file_descriptor)
return data
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None,
use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict'):
'''
Execute a command, returns rc, stdout, and stderr.
:arg args: is the command to run
* If args is a list, the command will be run with shell=False.
* If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
* If args is a string and use_unsafe_shell=True it runs with shell=True.
:kw check_rc: Whether to call fail_json in case of non zero RC.
Default False
:kw close_fds: See documentation for subprocess.Popen(). Default True
:kw executable: See documentation for subprocess.Popen(). Default None
:kw data: If given, information to write to the stdin of the command
:kw binary_data: If False, append a newline to the data. Default False
:kw path_prefix: If given, additional path to find the command in.
This adds to the PATH environment vairable so helper commands in
the same directory can also be found
:kw cwd: If given, working directory to run the command inside
:kw use_unsafe_shell: See `args` parameter. Default False
:kw prompt_regex: Regex string (not a compiled regex) which can be
used to detect prompts in the stdout which would otherwise cause
the execution to hang (especially if no input data is specified)
:kw environ_update: dictionary to *update* os.environ with
:kw umask: Umask to be used when running the command. Default None
:kw encoding: Since we return native strings, on python3 we need to
know the encoding to use to transform from bytes to text. If you
want to always get bytes back, use encoding=None. The default is
"utf-8". This does not affect transformation of strings given as
args.
:kw errors: Since we return native strings, on python3 we need to
transform stdout and stderr from bytes to text. If the bytes are
undecodable in the ``encoding`` specified, then use this error
handler to deal with them. The default is ``surrogate_or_strict``
which means that the bytes will be decoded using the
surrogateescape error handler if available (available on all
python3 versions we support) otherwise a UnicodeError traceback
will be raised. This does not affect transformations of strings
given as args.
:returns: A 3-tuple of return code (integer), stdout (native string),
and stderr (native string). On python2, stdout and stderr are both
byte strings. On python3, stdout and stderr are text strings converted
according to the encoding and errors parameters. If you want byte
strings on python3, use encoding=None to turn decoding to text off.
'''
if isinstance(args, list):
if use_unsafe_shell:
args = " ".join([shlex_quote(x) for x in args])
shell = True
elif isinstance(args, (binary_type, text_type)) and use_unsafe_shell:
shell = True
elif isinstance(args, (binary_type, text_type)):
if not use_unsafe_shell:
# On python2.6 and below, shlex has problems with text type
# On python3, shlex needs a text type.
if PY2:
args = to_bytes(args, errors='surrogate_or_strict')
elif PY3:
args = to_text(args, errors='surrogateescape')
args = shlex.split(args)
else:
msg = "Argument 'args' to run_command must be list or string"
self.fail_json(rc=257, cmd=args, msg=msg)
shell = False
if use_unsafe_shell:
if executable is None:
executable = os.environ.get('SHELL')
if executable:
args = [executable, '-c', args]
else:
shell = True
prompt_re = None
if prompt_regex:
if isinstance(prompt_regex, text_type):
if PY3:
prompt_regex = to_bytes(prompt_regex, errors='surrogateescape')
elif PY2:
prompt_regex = to_bytes(prompt_regex, errors='surrogate_or_strict')
try:
prompt_re = re.compile(prompt_regex, re.MULTILINE)
except re.error:
self.fail_json(msg="invalid prompt regular expression given to run_command")
# expand things like $HOME and ~
if not shell:
args = [os.path.expanduser(os.path.expandvars(x)) for x in args if x is not None]
rc = 0
msg = None
st_in = None
# Manipulate the environ we'll send to the new process
old_env_vals = {}
# We can set this from both an attribute and per call
for key, val in self.run_command_environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if environ_update:
for key, val in environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if path_prefix:
old_env_vals['PATH'] = os.environ['PATH']
os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
# If using test-module and explode, the remote lib path will resemble ...
# /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
# If using ansible or ansible-playbook with a remote system ...
# /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
# Clean out python paths set by ansiballz
if 'PYTHONPATH' in os.environ:
pypaths = os.environ['PYTHONPATH'].split(':')
pypaths = [x for x in pypaths
if not x.endswith('/ansible_modlib.zip') and
not x.endswith('/debug_dir')]
os.environ['PYTHONPATH'] = ':'.join(pypaths)
if not os.environ['PYTHONPATH']:
del os.environ['PYTHONPATH']
# create a printable version of the command for use
# in reporting later, which strips out things like
# passwords from the args list
to_clean_args = args
if PY2:
if isinstance(args, text_type):
to_clean_args = to_bytes(args)
else:
if isinstance(args, binary_type):
to_clean_args = to_text(args)
if isinstance(args, (text_type, binary_type)):
to_clean_args = shlex.split(to_clean_args)
clean_args = []
is_passwd = False
for arg in (to_native(a) for a in to_clean_args):
if is_passwd:
is_passwd = False
clean_args.append('********')
continue
if PASSWD_ARG_RE.match(arg):
sep_idx = arg.find('=')
if sep_idx > -1:
clean_args.append('%s=********' % arg[:sep_idx])
continue
else:
is_passwd = True
arg = heuristic_log_sanitize(arg, self.no_log_values)
clean_args.append(arg)
clean_args = ' '.join(shlex_quote(arg) for arg in clean_args)
if data:
st_in = subprocess.PIPE
kwargs = dict(
executable=executable,
shell=shell,
close_fds=close_fds,
stdin=st_in,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# store the pwd
prev_dir = os.getcwd()
# make sure we're in the right working directory
if cwd and os.path.isdir(cwd):
cwd = os.path.abspath(os.path.expanduser(cwd))
kwargs['cwd'] = cwd
try:
os.chdir(cwd)
except (OSError, IOError):
e = get_exception()
self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, str(e)))
old_umask = None
if umask:
old_umask = os.umask(umask)
try:
if self._debug:
self.log('Executing: ' + clean_args)
cmd = subprocess.Popen(args, **kwargs)
# the communication logic here is essentially taken from that
# of the _communicate() function in ssh.py
stdout = b('')
stderr = b('')
rpipes = [cmd.stdout, cmd.stderr]
if data:
if not binary_data:
data += '\n'
if isinstance(data, text_type):
data = to_bytes(data)
cmd.stdin.write(data)
cmd.stdin.close()
while True:
rfds, wfds, efds = select.select(rpipes, [], rpipes, 1)
stdout += self._read_from_pipes(rpipes, rfds, cmd.stdout)
stderr += self._read_from_pipes(rpipes, rfds, cmd.stderr)
# if we're checking for prompts, do it now
if prompt_re:
if prompt_re.search(stdout) and not data:
if encoding:
stdout = to_native(stdout, encoding=encoding, errors=errors)
else:
stdout = stdout
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not rpipes or not rfds) and cmd.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually cmd.poll() is always None here if rpipes is empty
elif not rpipes and cmd.poll() is None:
cmd.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
cmd.stdout.close()
cmd.stderr.close()
rc = cmd.returncode
except (OSError, IOError):
e = get_exception()
self.log("Error Executing CMD:%s Exception:%s" % (clean_args, to_native(e)))
self.fail_json(rc=e.errno, msg=to_native(e), cmd=clean_args)
except Exception:
e = get_exception()
self.log("Error Executing CMD:%s Exception:%s" % (clean_args, to_native(traceback.format_exc())))
self.fail_json(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=clean_args)
# Restore env settings
for key, val in old_env_vals.items():
if val is None:
del os.environ[key]
else:
os.environ[key] = val
if old_umask:
os.umask(old_umask)
if rc != 0 and check_rc:
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
self.fail_json(cmd=clean_args, rc=rc, stdout=stdout, stderr=stderr, msg=msg)
# reset the pwd
os.chdir(prev_dir)
if encoding is not None:
return (rc, to_native(stdout, encoding=encoding, errors=errors),
to_native(stderr, encoding=encoding, errors=errors))
return (rc, stdout, stderr)
def append_to_file(self, filename, str):
filename = os.path.expandvars(os.path.expanduser(filename))
fh = open(filename, 'a')
fh.write(str)
fh.close()
def bytes_to_human(self, size):
return bytes_to_human(size)
# for backwards compatibility
pretty_bytes = bytes_to_human
def human_to_bytes(self, number, isbits=False):
return human_to_bytes(number, isbits)
#
# Backwards compat
#
# In 2.0, moved from inside the module to the toplevel
is_executable = is_executable
def get_module_path():
return os.path.dirname(os.path.realpath(__file__))
|
sonaht/ansible
|
lib/ansible/module_utils/basic.py
|
Python
|
gpl-3.0
| 104,645
|
[
"VisIt"
] |
3a63d4c8ead1cbcd32619c9c1622066635dc8ec3d4d106096747fb706db89321
|
from time import time, ctime
import numpy as np
import pickle
from math import pi
from ase.units import Hartree
from ase.io import write
from gpaw.blacs import BlacsGrid, Redistributor
from gpaw.io.tar import Writer, Reader
from gpaw.mpi import world, size, rank, serial_comm
from gpaw.utilities import devnull
from gpaw.utilities.blas import gemmdot, gemm, gemv
from gpaw.utilities.memory import maxrss
from gpaw.response.base import BASECHI
from gpaw.response.parallel import parallel_partition, gatherv
from gpaw.response.kernel import calculate_Kc, calculate_Kc_q
from gpaw.response.df0 import DF
class BSE(BASECHI):
"""This class defines Bethe-Salpeter equations."""
def __init__(self,
calc=None,
nbands=None,
nc=None,
nv=None,
w=None,
q=None,
eshift=None,
ecut=10.,
eta=0.2,
gw_skn=None, # GW QP energies in Hartree
rpad=None,
vcut=None, # Coulomb cutoff only 2D works
ftol=1e-5,
txt=None,
optical_limit=None,
integrate_coulomb=None,
print_coulomb=False,
coupling=False, # False : use Tamm-Dancoff Approx
mode='BSE', # BSE, TDHF or RPA
kernel_file=None,#'W_qGG',
qsymm=True):
BASECHI.__init__(self, calc=calc, nbands=nbands, w=w, q=q,
eshift=eshift, ecut=ecut, eta=eta, rpad=rpad,
ftol=ftol, txt=txt, optical_limit=optical_limit)
assert mode is 'RPA' or 'TDHF' or 'BSE'
self.epsilon_w = None
self.coupling = coupling
self.vcut = vcut
self.nc = nc # conduction band index
self.nv = nv # valence band index
self.gw_skn = gw_skn
self.mode = mode
self.integrate_coulomb = integrate_coulomb
self.print_coulomb = print_coulomb
self.kernel_file = kernel_file
self.qsymm = qsymm
def initialize(self):
self.printtxt('----------------------------------------')
self.printtxt('Bethe-Salpeter Equation calculation')
self.printtxt('----------------------------------------')
self.printtxt('Started at: %s' % ctime())
self.printtxt('')
BASECHI.initialize(self)
assert self.nspins == 1
calc = self.calc
self.kd = kd = calc.wfs.kd
# frequency points init
self.dw = self.w_w[1] - self.w_w[0]
assert ((self.w_w[1:] - self.w_w[:-1] - self.dw) < 1e-10).all() # make sure its linear w grid
assert self.w_w.max() == self.w_w[-1]
self.dw /= Hartree
self.w_w /= Hartree
self.wmax = self.w_w[-1]
self.Nw = int(self.wmax / self.dw) + 1
# band init
if self.nc is None:
nv = self.nvalence / 2 - 1
self.nv = np.array([nv, nv+1]) # conduction band start / end
self.nc = np.array([nv+1, nv+2]) # valence band start / end
self.printtxt('')
self.printtxt('Number of electrons : %d' % (self.nvalence))
self.printtxt('Valence band included : (band %d to band %d)' %(self.nv[0], self.nv[1]-1))
self.printtxt('Conduction band included : (band %d to band %d)' %(self.nc[0], self.nc[1]-1))
if self.eshift is not None:
self.printtxt('Scissors operator : %2.3f eV' % self.eshift)
self.printtxt('')
# find the pair index and initialized pair energy (e_i - e_j) and occupation(f_i-f_j)
self.e_S = {}
focc_s = {}
self.Sindex_S3 = {}
iS = 0
kq_k = self.kq_k
for k1 in range(self.kd.nbzkpts):
ibzkpt1 = kd.bz2ibz_k[k1]
ibzkpt2 = kd.bz2ibz_k[kq_k[k1]]
for n1 in range(self.nv[0], self.nv[1]):
for m1 in range(self.nc[0], self.nc[1]):
focc = self.f_skn[0][ibzkpt1,n1] - self.f_skn[0][ibzkpt2,m1]
if self.coupling: # Dont use Tamm-Dancoff Approx.
check_ftol = np.abs(focc) > self.ftol
else:
check_ftol = focc > self.ftol
if check_ftol:
if self.gw_skn is None:
self.e_S[iS] = self.e_skn[0][ibzkpt2,m1] - self.e_skn[0][ibzkpt1,n1]
else:
self.e_S[iS] = self.gw_skn[0][ibzkpt2,m1] - self.gw_skn[0][ibzkpt1,n1]
focc_s[iS] = focc
self.Sindex_S3[iS] = (k1, n1, m1)
iS += 1
self.nS = iS
self.focc_S = np.zeros(self.nS)
for iS in range(self.nS):
self.focc_S[iS] = focc_s[iS]
# q points init
self.bzq_qc = kd.get_bz_q_points()
if not self.qsymm:
self.ibzq_qc = self.bzq_qc
else:
(self.ibzq_qc, self.ibzq_q, self.iop_q,
self.timerev_q, self.diff_qc) = kd.get_ibz_q_points(self.bzq_qc,
calc.wfs.symmetry.op_scc)
if np.abs(self.bzq_qc - kd.bzk_kc).sum() < 1e-8:
assert np.abs(self.ibzq_qc - kd.ibzk_kc).sum() < 1e-8
self.nibzq = len(self.ibzq_qc)
# Parallel initialization
# kcomm and wScomm is only to be used when wavefunctions are distributed in parallel.
self.comm = self.Scomm = world
self.kcomm = world
self.wScomm = serial_comm
self.nS, self.nS_local, self.nS_start, self.nS_end = parallel_partition(
self.nS, world.rank, world.size, reshape=False)
self.print_bse()
if calc.input_parameters['mode'] == 'lcao':
calc.initialize_positions()
# Coulomb interaction at q=0 for Hartree coupling
### 2D z direction only !!!!!!!!!!!!!!!!!!!!!!
if self.integrate_coulomb is None:
self.integrate_coulomb = []
if self.vcut is None:
pass
elif self.vcut == '2D':
for iG in range(len(self.Gvec_Gc)):
if self.Gvec_Gc[iG, 0] == 0 and self.Gvec_Gc[iG, 1] == 0:
self.integrate_coulomb.append(iG)
else:
raise NotImplementedError
elif type(self.integrate_coulomb) is int:
self.integrate_coulomb = range(self.integrate_coulomb)
elif self.integrate_coulomb == 'all':
self.integrate_coulomb = range(len(self.Gvec_Gc))
elif type(self.integrate_coulomb) is list:
pass
else:
raise 'Invalid option for integrate_coulomb'
self.printtxt('')
self.printtxt('Calculating bare Coulomb kernel')
if not len(self.integrate_coulomb) == 0:
self.printtxt('Integrating Coulomb kernel at %s reciprocal lattice vector(s)' % len(self.integrate_coulomb))
# Coulomb interaction at problematic G's for exchange coupling
if len(self.integrate_coulomb) != 0:
self.vint_Gq = []
for iG in self.integrate_coulomb:
v_q, v0_q = calculate_Kc_q(self.acell_cv,
self.bcell_cv,
self.pbc,
self.kd.N_c,
vcut=self.vcut,
Gvec_c=self.Gvec_Gc[iG],
q_qc=self.ibzq_qc.copy())
self.vint_Gq.append(v_q)
if self.print_coulomb:
self.printtxt('')
self.printtxt('Average kernel relative to bare kernel - \int v(q)dq / v(q0): ')
self.printtxt(' G: % s' % self.Gvec_Gc[iG])
for iq in range(len(v_q)):
q_s = ' q = [%1.2f, %1.2f, %1.2f]: ' % (self.ibzq_qc[iq,0],
self.ibzq_qc[iq,1],
self.ibzq_qc[iq,2])
v_rel = v_q[iq] / v0_q[iq]
self.printtxt(q_s + '%1.3f' % v_rel)
self.printtxt('')
self.V_qGG = self.full_bare_interaction()
return
def calculate(self):
calc = self.calc
f_skn = self.f_skn
e_skn = self.e_skn
kq_k = self.kq_k
focc_S = self.focc_S
e_S = self.e_S
op_scc = calc.wfs.symmetry.op_scc
# Get phi_qaGp
if self.mode == 'RPA':
self.phi_aGp = self.get_phi_aGp()
else:
try:
self.reader = Reader('phi_qaGp')
tmp = self.load_phi_aGp(self.reader, 0)[0]
assert len(tmp) == self.npw
self.printtxt('Finished reading phi_aGp')
except:
self.printtxt('Calculating phi_qaGp')
self.get_phi_qaGp()
world.barrier()
self.reader = Reader('phi_qaGp')
self.printtxt('Memory used %f M' % (maxrss() / 1024.**2))
self.printtxt('')
if self.optical_limit:
iq = np.where(np.sum(abs(self.ibzq_qc), axis=1) < 1e-5)[0][0]
else:
iq = np.where(np.sum(abs(self.ibzq_qc - self.q_c), axis=1) < 1e-5)[0][0]
kc_G = np.array([self.V_qGG[iq, iG, iG] for iG in range(self.npw)])
if self.optical_limit:
kc_G[0] = 0.
# Get screened Coulomb kernel
if self.mode == 'BSE':
try:
# Read
data = pickle.load(open(self.kernel_file+'.pckl'))
W_qGG = data['W_qGG']
assert np.shape(W_qGG) == np.shape(self.V_qGG)
self.printtxt('Finished reading screening interaction kernel')
except:
# Calculate from scratch
self.printtxt('Calculating screening interaction kernel.')
W_qGG = self.full_static_screened_interaction()
self.printtxt('')
else:
W_qGG = self.V_qGG
t0 = time()
self.printtxt('Calculating %s matrix elements' % self.mode)
# Calculate full kernel
K_SS = np.zeros((self.nS_local, self.nS), dtype=complex)
self.rhoG0_S = np.zeros(self.nS, dtype=complex)
#noGmap = 0
for iS in range(self.nS_start, self.nS_end):
k1, n1, m1 = self.Sindex_S3[iS]
rho1_G = self.density_matrix(n1,m1,k1)
self.rhoG0_S[iS] = rho1_G[0]
for jS in range(self.nS):
k2, n2, m2 = self.Sindex_S3[jS]
rho2_G = self.density_matrix(n2,m2,k2)
K_SS[iS-self.nS_start, jS] = np.sum(rho1_G.conj() * rho2_G * kc_G)
if not self.mode == 'RPA':
rho3_G = self.density_matrix(n1,n2,k1,k2)
rho4_G = self.density_matrix(m1,m2,self.kq_k[k1],
self.kq_k[k2])
q_c = self.kd.bzk_kc[k2] - self.kd.bzk_kc[k1]
q_c[np.where(q_c > 0.501)] -= 1.
q_c[np.where(q_c < -0.499)] += 1.
iq = self.kd.where_is_q(q_c, self.bzq_qc)
if not self.qsymm:
W_GG = W_qGG[iq]
else:
ibzq = self.ibzq_q[iq]
W_GG_tmp = W_qGG[ibzq]
iop = self.iop_q[iq]
timerev = self.timerev_q[iq]
diff_c = self.diff_qc[iq]
invop = np.linalg.inv(op_scc[iop])
Gindex = np.zeros(self.npw, dtype=int)
for iG in range(self.npw):
G_c = self.Gvec_Gc[iG]
if timerev:
RotG_c = -np.int8(np.dot(invop, G_c+diff_c).round())
else:
RotG_c = np.int8(np.dot(invop, G_c+diff_c).round())
tmp_G = np.abs(self.Gvec_Gc - RotG_c).sum(axis=1)
try:
Gindex[iG] = np.where(tmp_G < 1e-5)[0][0]
except:
#noGmap += 1
Gindex[iG] = -1
W_GG = np.zeros_like(W_GG_tmp)
for iG in range(self.npw):
for jG in range(self.npw):
if Gindex[iG] == -1 or Gindex[jG] == -1:
W_GG[iG, jG] = 0
else:
W_GG[iG, jG] = W_GG_tmp[Gindex[iG], Gindex[jG]]
if self.mode == 'BSE':
tmp_GG = np.outer(rho3_G.conj(), rho4_G) * W_GG
K_SS[iS-self.nS_start, jS] -= 0.5 * np.sum(tmp_GG)
else:
tmp_G = rho3_G.conj() * rho4_G * np.diag(W_GG)
K_SS[iS-self.nS_start, jS] -= 0.5 * np.sum(tmp_G)
self.timing(iS, t0, self.nS_local, 'pair orbital')
K_SS /= self.vol
world.sum(self.rhoG0_S)
#self.printtxt('Number of G indices outside the Gvec_Gc: %d' % noGmap)
# Get and solve Hamiltonian
H_sS = np.zeros_like(K_SS)
for iS in range(self.nS_start, self.nS_end):
H_sS[iS-self.nS_start,iS] = e_S[iS]
for jS in range(self.nS):
H_sS[iS-self.nS_start,jS] += focc_S[iS] * K_SS[iS-self.nS_start,jS]
# Force matrix to be Hermitian
if not self.coupling:
if world.size > 1:
H_Ss = self.redistribute_H(H_sS)
else:
H_Ss = H_sS
H_sS = (np.real(H_sS) + np.real(H_Ss.T)) / 2. + 1j * (np.imag(H_sS) - np.imag(H_Ss.T)) /2.
# Save H_sS matrix
self.par_save('H_SS','H_SS', H_sS)
return H_sS
def diagonalize(self, H_sS):
if self.coupling: # Non-Hermitian matrix can only use linalg.eig
self.printtxt('Use numpy.linalg.eig')
H_SS = np.zeros((self.nS, self.nS), dtype=complex)
if self.nS % world.size == 0:
world.all_gather(H_sS, H_SS)
else:
H_SS = gatherv(H_sS)
self.w_S, self.v_SS = np.linalg.eig(H_SS)
self.par_save('v_SS', 'v_SS', self.v_SS[self.nS_start:self.nS_end, :].copy())
else:
if world.size == 1:
self.printtxt('Use lapack.')
from gpaw.utilities.lapack import diagonalize
self.w_S = np.zeros(self.nS)
H_SS = H_sS
diagonalize(H_SS, self.w_S)
self.v_SS = H_SS.conj() # eigenvectors in the rows, transposed later
else:
self.printtxt('Use scalapack')
self.w_S, self.v_sS = self.scalapack_diagonalize(H_sS)
self.v_SS = self.v_sS # just use the same name
self.par_save('v_SS', 'v_SS', self.v_SS)
return
def par_save(self,filename, name, A_sS):
from gpaw.io import open
nS_local = self.nS_local
nS = self.nS
if rank == 0:
w = open(filename, 'w', world)
w.dimension('nS', nS)
if name == 'v_SS':
w.add('w_S', ('nS',), dtype=self.w_S.dtype)
w.fill(self.w_S)
w.add('rhoG0_S', ('nS',), dtype=complex)
w.fill(self.rhoG0_S)
w.add(name, ('nS', 'nS'), dtype=complex)
tmp = np.zeros_like(A_sS)
# Assumes that H_SS is written in order from rank 0 - rank N
for irank in range(size):
if irank == 0:
if rank == 0:
w.fill(A_sS)
else:
if rank == irank:
world.send(A_sS, 0, irank+100)
if rank == 0:
world.receive(tmp, irank, irank+100)
w.fill(tmp)
if rank == 0:
w.close()
world.barrier()
def par_load(self,filename, name):
from gpaw.io import open
r = open(filename, 'r')
nS = r.dimension('nS')
if name == 'v_SS':
self.w_S = r.get('w_S')
A_SS = np.zeros((self.nS_local, nS), dtype=complex)
for iS in range(self.nS_start, self.nS_end):
A_SS[iS-self.nS_start,:] = r.get(name, iS)
self.rhoG0_S = r.get('rhoG0_S')
r.close()
return A_SS
def full_static_screened_interaction(self):
"""Calcuate W_GG(q)"""
W_qGG = np.zeros((self.nibzq, self.npw, self.npw), dtype=complex)
t0 = time()
for iq in range(self.nibzq):
q = self.ibzq_qc[iq]
optical_limit = False
if np.abs(q).sum() < 1e-8:
q = self.q_c.copy()
optical_limit = True
df = DF(calc=self.calc,
q=q,
w=(0.,),
optical_limit=optical_limit,
nbands=self.nbands,
hilbert_trans=False,
eta=0.0001,
ecut=self.ecut*Hartree,
xc='RPA',
txt='df.out')
df.initialize()
df.calculate()
if optical_limit:
K_GG = self.V_qGG[iq].copy()
q_v = np.dot(q, self.bcell_cv)
K0 = calculate_Kc(q,
self.Gvec_Gc,
self.acell_cv,
self.bcell_cv,
self.pbc,
vcut=self.vcut)[0,0]
for iG in range(1,self.npw):
K_GG[0, iG] = self.V_qGG[iq, iG, iG]**0.5 * K0**0.5
K_GG[iG, 0] = self.V_qGG[iq, iG, iG]**0.5 * K0**0.5
K_GG[0,0] = K0
df_GG = np.eye(self.npw, self.npw) - K_GG*df.chi0_wGG[0]
else:
df_GG = np.eye(self.npw, self.npw) - self.V_qGG[iq]*df.chi0_wGG[0]
dfinv_GG = np.linalg.inv(df_GG)
if optical_limit:
eps = 1/dfinv_GG[0,0]
self.printtxt(' RPA macroscopic dielectric constant is: %3.3f' % eps.real)
W_qGG[iq] = dfinv_GG * self.V_qGG[iq]
self.timing(iq, t0, self.nibzq, 'iq')
if rank == 0:
if self.kernel_file is not None:
data = {'W_qGG': W_qGG}
name = self.kernel_file+'.pckl'
pickle.dump(data, open(name, 'w'), -1)
return W_qGG
def full_bare_interaction(self):
"""Calculate V_GG(q)"""
V_qGG = np.zeros((self.nibzq, self.npw, self.npw), dtype=complex)
t0 = time()
for iq in range(self.nibzq):
q = self.ibzq_qc[iq]
Vq_G = np.diag(calculate_Kc(q,
self.Gvec_Gc,
self.acell_cv,
self.bcell_cv,
self.pbc,
integrate_gamma=True,
N_k=self.kd.N_c,
vcut=self.vcut))**0.5
for i, iG in enumerate(self.integrate_coulomb):
Vq_G[iG] = self.vint_Gq[i][iq]**0.5
V_qGG[iq] = np.outer(Vq_G, Vq_G)
return V_qGG
def print_bse(self):
printtxt = self.printtxt
if not self.mode == 'RPA':
printtxt('Number of q points : %d' %(self.nibzq))
printtxt('Number of frequency points : %d' %(self.Nw) )
printtxt('Number of pair orbitals : %d' %(self.nS) )
printtxt('')
printtxt('Parallelization scheme:')
printtxt(' Total cpus : %d' %(world.size))
printtxt(' pair orb parsize : %d' %(self.Scomm.size))
return
def get_phi_qaGp(self):
N1_max = 0
N2_max = 0
natoms = len(self.calc.wfs.setups)
for id in range(natoms):
N1 = self.npw
N2 = self.calc.wfs.setups[id].ni**2
if N1 > N1_max:
N1_max = N1
if N2 > N2_max:
N2_max = N2
nbzq = self.kd.nbzkpts
nbzq, nq_local, q_start, q_end = parallel_partition(
nbzq, world.rank, world.size, reshape=False)
phimax_qaGp = np.zeros((nq_local, natoms, N1_max, N2_max), dtype=complex)
#phimax_qaGp = np.zeros((nbzq, natoms, N1_max, N2_max), dtype=complex)
t0 = time()
for iq in range(nq_local):
q_c = self.bzq_qc[iq + q_start]
tmp_aGp = self.get_phi_aGp(q_c, parallel=False)
for id in range(natoms):
N1, N2 = tmp_aGp[id].shape
phimax_qaGp[iq, id, :N1, :N2] = tmp_aGp[id]
self.timing(iq*world.size, t0, nq_local, 'iq')
world.barrier()
# Write to disk
filename = 'phi_qaGp'
if world.rank == 0:
w = Writer(filename)
w.dimension('nbzq', nbzq)
w.dimension('natoms', natoms)
w.dimension('nG', N1_max)
w.dimension('nii', N2_max)
w.add('phi_qaGp', ('nbzq', 'natoms', 'nG', 'nii',), dtype=complex)
for q in range(nbzq):
residual = nbzq % size
N_local = nbzq // size
if q < residual * (N_local + 1):
qrank = q // (N_local + 1)
else:
qrank = (q - residual * (N_local + 1)) // N_local + residual
if qrank == 0:
if world.rank == 0:
phi_aGp = phimax_qaGp[q - q_start]
else:
if world.rank == qrank:
phi_aGp = phimax_qaGp[q - q_start]
world.send(phi_aGp, 0, q)
elif world.rank == 0:
world.receive(phi_aGp, qrank, q)
if world.rank == 0:
w.fill(phi_aGp)
world.barrier()
if world.rank == 0:
w.close()
return
def load_phi_aGp(self, reader, iq):
phimax_aGp = np.array(reader.get('phi_qaGp', iq), complex)
phi_aGp = {}
natoms = len(phimax_aGp)
for a in range(natoms):
N1 = self.npw
N2 = self.calc.wfs.setups[a].ni**2
phi_aGp[a] = phimax_aGp[a, :N1, :N2]
return phi_aGp
def get_dielectric_function(self, filename='df.dat', readfile=None):
if self.epsilon_w is None:
self.initialize()
if readfile is None:
H_sS = self.calculate()
self.printtxt('Diagonalizing %s matrix.' % self.mode)
self.diagonalize(H_sS)
self.printtxt('Calculating dielectric function.')
elif readfile == 'H_SS':
H_sS = self.par_load('H_SS', 'H_SS')
self.printtxt('Finished reading H_SS.gpw')
self.diagonalize(H_sS)
self.printtxt('Finished diagonalizing BSE matrix')
elif readfile == 'v_SS':
self.v_SS = self.par_load('v_SS', 'v_SS')
self.printtxt('Finished reading v_SS.gpw')
else:
XX
w_S = self.w_S
if not self.coupling:
v_SS = self.v_SS.T # v_SS[:,lamda]
else:
v_SS = self.v_SS
rhoG0_S = self.rhoG0_S
focc_S = self.focc_S
# get overlap matrix
if self.coupling:
tmp = np.dot(v_SS.conj().T, v_SS )
overlap_SS = np.linalg.inv(tmp)
# get chi
epsilon_w = np.zeros(self.Nw, dtype=complex)
t0 = time()
A_S = np.dot(rhoG0_S, v_SS)
B_S = np.dot(rhoG0_S*focc_S, v_SS)
if self.coupling:
C_S = np.dot(B_S.conj(), overlap_SS.T) * A_S
else:
if world.size == 1:
C_S = B_S.conj() * A_S
else:
tmp = B_S.conj() * A_S
C_S = gatherv(tmp, self.nS)
for iw in range(self.Nw):
tmp_S = 1. / (iw*self.dw - w_S + 1j*self.eta)
epsilon_w[iw] += np.dot(tmp_S, C_S)
epsilon_w *= - 4 * pi / np.inner(self.qq_v, self.qq_v) / self.vol
epsilon_w += 1
self.epsilon_w = epsilon_w
if rank == 0:
f = open(filename,'w')
#g = open('excitons.dat', 'w')
for iw in range(self.Nw):
energy = iw * self.dw * Hartree
print >> f, energy, np.real(epsilon_w[iw]), np.imag(epsilon_w[iw])
#print >> g, energy, np.real(C_S[iw])/max(abs(C_S))
f.close()
#g.close()
# Wait for I/O to finish
world.barrier()
"""Check f-sum rule."""
N1 = 0
for iw in range(self.Nw):
w = iw * self.dw
N1 += np.imag(epsilon_w[iw]) * w
N1 *= self.dw * self.vol / (2 * pi**2)
self.printtxt('')
self.printtxt('Sum rule:')
nv = self.nvalence
self.printtxt('N1 = %f, %f %% error' %(N1, (N1 - nv) / nv * 100) )
return epsilon_w
def get_e_h_density(self, lamda=None, filename=None):
if filename is not None:
self.load(filename)
self.initialize()
gd = self.gd
w_S = self.w_S
v_SS = self.v_SS
A_S = v_SS[:, lamda]
kq_k = self.kq_k
kd = self.kd
# Electron density
nte_R = gd.zeros()
for iS in range(self.nS_start, self.nS_end):
print 'electron density:', iS
k1, n1, m1 = self.Sindex_S3[iS]
ibzkpt1 = kd.bz2ibz_k[k1]
psitold_g = self.get_wavefunction(ibzkpt1, n1)
psit1_g = kd.transform_wave_function(psitold_g, k1)
for jS in range(self.nS):
k2, n2, m2 = self.Sindex_S3[jS]
if m1 == m2 and k1 == k2:
psitold_g = self.get_wavefunction(ibzkpt1, n2)
psit2_g = kd.transform_wave_function(psitold_g, k1)
nte_R += A_S[iS] * A_S[jS].conj() * psit1_g.conj() * psit2_g
# Hole density
nth_R = gd.zeros()
for iS in range(self.nS_start, self.nS_end):
print 'hole density:', iS
k1, n1, m1 = self.Sindex_S3[iS]
ibzkpt1 = kd.bz2ibz_k[kq_k[k1]]
psitold_g = self.get_wavefunction(ibzkpt1, m1)
psit1_g = kd.transform_wave_function(psitold_g, kq_k[k1])
for jS in range(self.nS):
k2, n2, m2 = self.Sindex_S3[jS]
if n1 == n2 and k1 == k2:
psitold_g = self.get_wavefunction(ibzkpt1, m2)
psit2_g = kd.transform_wave_function(psitold_g, kq_k[k1])
nth_R += A_S[iS] * A_S[jS].conj() * psit1_g * psit2_g.conj()
self.Scomm.sum(nte_R)
self.Scomm.sum(nth_R)
if rank == 0:
write('rho_e.cube',self.calc.atoms, format='cube', data=nte_R)
write('rho_h.cube',self.calc.atoms, format='cube', data=nth_R)
world.barrier()
return
def get_excitation_wavefunction(self, lamda=None,filename=None, re_c=None, rh_c=None):
""" garbage at the moment. come back later"""
if filename is not None:
self.load(filename)
self.initialize()
gd = self.gd
w_S = self.w_S
v_SS = self.v_SS
A_S = v_SS[:, lamda]
kq_k = self.kq_k
kd = self.kd
nx, ny, nz = self.gd.N_c
nR = 9
nR2 = (nR - 1 ) // 2
if re_c is not None:
psith_R = gd.zeros(dtype=complex)
psith2_R = np.zeros((nR*nx, nR*ny, nz), dtype=complex)
elif rh_c is not None:
psite_R = gd.zeros(dtype=complex)
psite2_R = np.zeros((nR*nx, ny, nR*nz), dtype=complex)
else:
self.printtxt('No wavefunction output !')
return
for iS in range(self.nS_start, self.nS_end):
k, n, m = self.Sindex_S3[iS]
ibzkpt1 = kd.bz2ibz_k[k]
ibzkpt2 = kd.bz2ibz_k[kq_k[k]]
print 'hole wavefunction', iS, (k,n,m),A_S[iS]
psitold_g = self.get_wavefunction(ibzkpt1, n)
psit1_g = kd.transform_wave_function(psitold_g, k)
psitold_g = self.get_wavefunction(ibzkpt2, m)
psit2_g = kd.transform_wave_function(psitold_g, kq_k[k])
if re_c is not None:
# given electron position, plot hole wavefunction
tmp = A_S[iS] * psit1_g[re_c].conj() * psit2_g
psith_R += tmp
k_c = self.kd.bzk_kc[k] + self.q_c
for i in range(nR):
for j in range(nR):
R_c = np.array([i-nR2, j-nR2, 0])
psith2_R[i*nx:(i+1)*nx, j*ny:(j+1)*ny, 0:nz] += \
tmp * np.exp(1j*2*pi*np.dot(k_c,R_c))
elif rh_c is not None:
# given hole position, plot electron wavefunction
tmp = A_S[iS] * psit1_g.conj() * psit2_g[rh_c] * self.expqr_g
psite_R += tmp
k_c = self.kd.bzk_kc[k]
k_v = np.dot(k_c, self.bcell_cv)
for i in range(nR):
for j in range(nR):
R_c = np.array([i-nR2, 0, j-nR2])
R_v = np.dot(R_c, self.acell_cv)
assert np.abs(np.dot(k_v, R_v) - np.dot(k_c, R_c) * 2*pi).sum() < 1e-5
psite2_R[i*nx:(i+1)*nx, 0:ny, j*nz:(j+1)*nz] += \
tmp * np.exp(-1j*np.dot(k_v,R_v))
else:
pass
if re_c is not None:
self.Scomm.sum(psith_R)
self.Scomm.sum(psith2_R)
if rank == 0:
write('psit_h.cube',self.calc.atoms, format='cube', data=psith_R)
atoms = self.calc.atoms
shift = atoms.cell[0:2].copy()
positions = atoms.positions
atoms.cell[0:2] *= nR2
atoms.positions += shift * (nR2 - 1)
write('psit_bigcell_h.cube',atoms, format='cube', data=psith2_R)
elif rh_c is not None:
self.Scomm.sum(psite_R)
self.Scomm.sum(psite2_R)
if rank == 0:
write('psit_e.cube',self.calc.atoms, format='cube', data=psite_R)
atoms = self.calc.atoms
# shift = atoms.cell[0:2].copy()
positions = atoms.positions
atoms.cell[0:2] *= nR2
# atoms.positions += shift * (nR2 - 1)
write('psit_bigcell_e.cube',atoms, format='cube', data=psite2_R)
else:
pass
world.barrier()
return
def load(self, filename):
data = pickle.load(open(filename))
self.w_S = data['w_S']
self.v_SS = data['v_SS']
self.printtxt('Read succesfully !')
def save(self, filename):
"""Dump essential data"""
data = {'w_S' : self.w_S,
'v_SS' : self.v_SS}
if rank == 0:
pickle.dump(data, open(filename, 'w'), -1)
world.barrier()
def redistribute_H(self, H_sS):
g1 = BlacsGrid(world, size, 1)
g2 = BlacsGrid(world, 1, size)
N = self.nS
nndesc1 = g1.new_descriptor(N, N, self.nS_local, N)
nndesc2 = g2.new_descriptor(N, N, N, self.nS_local)
H_Ss = nndesc2.empty(dtype=H_sS.dtype)
redistributor = Redistributor(world, nndesc1, nndesc2)
redistributor.redistribute(H_sS, H_Ss)
return H_Ss
def scalapack_diagonalize(self, H_sS):
mb = 32
N = self.nS
g1 = BlacsGrid(world, size, 1)
g2 = BlacsGrid(world, size//2, 2)
nndesc1 = g1.new_descriptor(N, N, self.nS_local, N)
nndesc2 = g2.new_descriptor(N, N, mb, mb)
A_ss = nndesc2.empty(dtype=H_sS.dtype)
redistributor = Redistributor(world, nndesc1, nndesc2)
redistributor.redistribute(H_sS, A_ss)
# diagonalize
v_ss = nndesc2.zeros(dtype=A_ss.dtype)
w_S = np.zeros(N,dtype=float)
nndesc2.diagonalize_dc(A_ss, v_ss, w_S, 'L')
# distribute the eigenvectors to master
v_sS = np.zeros_like(H_sS)
redistributor = Redistributor(world, nndesc2, nndesc1)
redistributor.redistribute(v_ss, v_sS)
# v2_SS = np.zeros((self.nS, self.nS), dtype=complex)
# world.all_gather(v_sS, v2_SS)
return w_S, v_sS.conj()
def get_chi(self, w):
H_SS = self.calculate()
self.printtxt('Diagonalizing BSE matrix.')
self.diagonalize(H_SS)
self.printtxt('Calculating BSE response function.')
w_S = self.w_S
if not self.coupling:
v_SS = self.v_SS.T # v_SS[:,lamda]
else:
v_SS = self.v_SS
rhoG0_S = self.rhoG0_S
focc_S = self.focc_S
# get overlap matrix
if self.coupling:
tmp = np.dot(v_SS.conj().T, v_SS )
overlap_SS = np.linalg.inv(tmp)
# get chi
chi_wGG = np.zeros((len(w), self.npw, self.npw), dtype=complex)
t0 = time()
A_S = np.dot(rhoG0_S, v_SS)
B_S = np.dot(rhoG0_S*focc_S, v_SS)
if self.coupling:
C_S = np.dot(B_S.conj(), overlap_SS.T) * A_S
else:
C_S = B_S.conj() * A_S
return chi_wGG
|
robwarm/gpaw-symm
|
gpaw/response/bse.py
|
Python
|
gpl-3.0
| 34,879
|
[
"ASE",
"GPAW"
] |
62e3fdee0a818ac9c9b2d0287e02f6d4ca43fd1e9165d3fb1c0fdd4fa73223b3
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
from astropy.io import fits
from astropy.time import Time
from PyAstronomy import pyasl
from scipy import ndimage
import pandas as pd
import gaussfitter as gf
'''
Functions used in BF_python.py
Read the damn comments
(I'm sorry there aren't more objects)
'''
def logify_spec(isAPOGEE=False, w00=5400, n=38750, stepV=1.7, m=171):
# The new log-wavelength array will be w1. it will have equal spacing in velocity.
# Specify reasonable values when you call this function or else bad things will happen.
### GUIDELINES FOR CHOOSING GOOD INPUT VALUES ###
# good APOGEE values
# w00 = 15145 # starting wavelength of the log-wave array in Angstroms
# n = 20000 # desired length of the log-wave vector in pixels (must be EVEN)
# good ARCES values
# w00 = 5400 # starting wavelength of the log-wave array in Angstroms
# n = 38750 # desired length of the log-wave vector in pixels (must be EVEN)
# stepV = 1.7 # step in velocities in the wavelength vector w1
# m = 171 # length of BF (must be ODD)
### GUIDELINES FOR CHOOSING GOOD INPUT VALUES ###
r = stepV/299792.458 # put stepV in km/s/pix
w1 = w00 * np.power((1+r), np.arange(float(n)))
print('The new log-wavelength scale will span %d - %d A with stepsize %f km/s.' % (w1[0], w1[-1], stepV))
print(' ')
return w1, m, r
def read_one_specfile(infile = 'myspectrum.txt', isAPOGEE = False):
'''
Read in a single FITS or txt spectrum file
(Bare-bones version of read_specfiles, below)
Requires infile, isAPOGEE
Returns wave, spec
'''
if infile[-3:] == 'txt':
try:
wave, spec = np.loadtxt(open(infile), comments='#', usecols=(0,1), unpack=True)
print('Text file {0}, isAPOGEE = {1}'.format(infile[-15:], isAPOGEE))
except:
raise FileNotFoundError('The file {0} was not found or cannot be opened'.format(infile))
if isAPOGEE == True: # we need to sort by wavelength
spec = spec[np.argsort(wave)]
wave = wave[np.argsort(wave)]
elif infile[-4:] == 'fits' or infile[-4:] == 'FITS':
# assume it's a FITS file
# Read in the FITS file with all the data in the primary HDU
try:
hdu = fits.open(infile)
except:
print('{0} not found or cannot be opened'.format(infile))
else:
head = hdu[0].header
try: datetime = head['date-obs']
except: datetime = head['date']
print('FITS file {0}, isAPOGEE = {1}, header date {2}'.format(infile[-17:], isAPOGEE, datetime))
if isAPOGEE == True: # APOGEE: the data is in a funny place and backwards
spec = hdu[1].data
spec = spec.flatten()
spec = spec[::-1]
else: # non-APOGEE (regular) option
spec = hdu[0].data
# Define the original wavelength scale
if isAPOGEE == True: # APOGEE: read wavelength values straight from FITS file
wave = hdu[4].data
wave = wave.flatten()
wave = wave[::-1]
else: # non-APOGEE (linear): create wavelength values from header data
headerdwave = head['cdelt1']
headerwavestart = head['crval1']
headerwavestop = headerwavestart + headerdwave*len(spec)
wave = np.arange(headerwavestart, headerwavestop, headerdwave)
if len(wave) != len(spec): # The wave array is sometimes 1 longer than it should be?
minlength = min(len(wave), len(spec))
wave = wave[0:minlength]
spec = spec[0:minlength]
try: # check to see if we have a file with log angstroms
logcheck = head['dispunit']
except:
logcheck = 'linear' # assume linear if no 'dispunit' is in header
if logcheck == 'log angstroms':
wave = np.power(10,wave) # make it linear
else:
print('File does not end in \'txt\' or \'fits\', no spectrum loaded.')
wave = []; spec = []
return wave, spec
def read_specfiles(infiles = 'infiles_BF.txt', bjdinfile = 'bjds_baryvels.txt', isAPOGEE = False):
'''
Read in some FITS or TXT files that are spectra and may or may not be APOGEE
Requires infiles, bjdinfile, isAPOGEE
Returns nspec, filenamelist, datetimelist, wavelist, speclist
'''
f1 = open(infiles)
print('Reading the files listed in %s' % infiles)
#print('The first one had better be your template spectrum.')
print(' ')
speclist = []; wavelist = []
filenamelist = []; datetimelist = []
if isAPOGEE == False:
checkAPOGEE = True #notallinfiles are APOGEE, but let's check in case *some* are
else:
checkAPOGEE = False #all the infiles are APOGEE so we don't have to search
i = 0
for line in f1: # This loop happens once for each spectrum
infile = line.rstrip()
if checkAPOGEE == True: # check to see if a subset of infiles are from APOGEE or not
if 'apogee' in infile or 'APOGEE' in infile: isAPOGEE = True
else: isAPOGEE = False
if infile[-4:] == 'fits' or infile[-4:] == 'FITS':
# assume it's a FITS file
try:
hdu = fits.open(infile)
head = hdu[0].header
filenamelist.append(infile)
try: datetime = head['date-obs']
except: datetime = head['date']
datetimelist.append(Time(datetime, scale='utc', format='isot'))
print('FITS file {0}, isAPOGEE = {1}, header date {2}'.format(infile[-17:], isAPOGEE, datetime))
except:
raise FileNotFoundError('The file {0} was not found or cannot be opened'.format(infile))
# it's time to dig out the spectral (flux) data and the wavelength scale!
if isAPOGEE == True: # APOGEE: the data is in a funny place and backwards
wave, spec = ProcessAPOGEEFITS(hdu)
else: # not APOGEE
spec = hdu[0].data # hope the info we want is in the zeroth HDU
try:
headerdwave = head['cdelt1']
headerwavestart = head['crval1']
headerwavestop = headerwavestart + headerdwave*len(spec)
wave = np.arange(headerwavestart, headerwavestop, headerdwave)
except:
raise RuntimeError('Cannot find wavelength info in FITS header')
if len(wave) != len(spec): # the wave array is sometimes 1 longer than it should be?
minlength = min(len(wave), len(spec))
wave = wave[0:minlength]
spec = spec[0:minlength]
try: # check to see if we have a file with log angstroms
logcheck = head['dispunit']
except:
logcheck = 'linear' # hopefully, at least
if logcheck == 'log angstroms':
wave = np.power(10, wave) # make it linear
#spec = spec / np.median(spec) # WARNING really basic, possibly bad normalization
else: # treat it like a text file
filenamelist.append(infile)
datetime = np.loadtxt(bjdinfile, comments='#', usecols=(1,), unpack=True)[i]
datetimelist.append(Time(datetime, scale='utc', format='jd'))
try:
wave, spec = np.loadtxt(open(infile), comments='#', usecols=(0,1), unpack=True)
print('Text file {0}, isAPOGEE = {1}, bjdinfile date {2}'.format(infile[-17:], isAPOGEE, datetime))
except:
raise FileNotFoundError('The file {0} was not found or cannot be opened'.format(infile))
if isAPOGEE == True: # we need sort by wavelength, just in case it hasn't been
spec = spec[np.argsort(wave)]
wave = wave[np.argsort(wave)]
# if infile[0:5] == 'trans': # you have a model telluric spectrum in nm, not A
# print("Assuming this is a telluric spectrum in nm, not A, proceed with caution")
# wave = wave*10
# at the end of this mess, we have one file's WAVE and corresponding SPEC - save it!
wavelist.append(wave)
speclist.append(spec)
i = i + 1
# save the total number of spectra
nspec = i
f1.close()
return nspec, filenamelist, datetimelist, wavelist, speclist
def ProcessAPOGEEFITS(hdu):
'''
Turns an APOGEE FITS hdu into a pair of wavelength and spectrum ndarrays
'''
spec = hdu[1].data
spec = spec.flatten()
spec = spec[::-1]
spec = spec / np.median(spec) # WARNING really basic, possibly bad normalization
wave = hdu[4].data
wave = wave.flatten()
wave = wave[::-1]
return wave, spec
def gaussparty(gausspars, nspec, filenamelist, bfsmoothlist, bf_ind, amplimits, threshold, widlimits):
'''
Fits 2 or 3 gaussians to some data
'''
param = []
with open(gausspars) as f1:
for line in f1:
if line[0] != '#':
param.append( line.rstrip() )
#param = np.loadtxt(gausspars, comments='#')
bffitlist = []
bffitlist.append(0)
gauss1 = [[] for i in range(nspec)]
gauss2 = [[] for i in range(nspec)]
gauss3 = [[] for i in range(nspec)]
gauss1[0] = [0,0]
gauss2[0] = [0,0]
gauss3[0] = [0,0]
error_array = np.ones(len(bfsmoothlist[0]))*0.01 # dummy array with 0.01 error values
print(' ')
print('Gaussian fit results: peak amplitude, width, rvraw, rvraw_err')
print ('-------------------------------------------------------------')
for i in range(1, nspec):
# check to see if we are fitting a third gaussian, i.e., one near zero
# don't print out the result of this fit, but do return it for plotting
# handle comments in gausspars file without exploding
if '#' in param[i]:
commentbegin = param[i].find('#')
partest = param[i][0:commentbegin].split()
else:
partest = param[i].split()
if len(partest) == 6: ngauss = 2
elif len(partest) == 9: ngauss = 3
else: print('something is wrong with your gausspars file!')
# min and max pars for peak 1: amp, rv, width
#minpars=[0.8, float(partest[1])-threshold, 0]
#maxpars=[1.0, float(partest[1])+threshold, 7]
# min and max pars for peak 2: amp, rv, width
#minpars.extend([0.05, float(partest[4])-threshold, 0])
#maxpars.extend([0.20, float(partest[4])+threshold, 40])
minpars = [amplimits[0], float(partest[1])-threshold, widlimits[0]]
maxpars = [amplimits[1], float(partest[1])+threshold, widlimits[1]]
minpars.extend([amplimits[2], float(partest[4])-threshold, widlimits[2]])
maxpars.extend([amplimits[3], float(partest[4])+threshold, widlimits[3]])
if ngauss == 2:
bffit = gf.multigaussfit(bf_ind, bfsmoothlist[i], ngauss=ngauss,
params=partest, err=error_array,
limitedmin=[True,True,True], limitedmax=[True,True,True],
minpars=minpars, maxpars=maxpars, quiet=True, shh=True)
elif ngauss == 3:
# min and max pars for peak 3: amp, rv, width (hardwired)
minpars.extend([0.05, float(partest[7])-threshold, 0])
maxpars.extend([1.0, float(partest[7])+threshold, 30])
bffit = gf.multigaussfit(bf_ind, bfsmoothlist[i], ngauss=ngauss,
params=partest, err=error_array,
limitedmin=[True,True,True], limitedmax=[True,True,True],
minpars=minpars, maxpars=maxpars, quiet=True, shh=True)
newbffit = [[] for x in range(len(bffit))]
# Sometimes bffit[2] is None, or contains None. Set it to zeros instead.
try:
if not any(bffit[2]): # this will fail if bffit[2] = None
newbffit[0] = bffit[0]
newbffit[1] = bffit[1]
newbffit[2] = [0, 0, 0, 0, 0]
else:
newbffit = bffit
except:
print('WARNING - gaussfit is acting up, fit failed for the next row, adjust gausspars file:')
if not bffit[2]: # this catches the case where bffit[2] = None
newbffit[0] = bffit[0]
newbffit[1] = bffit[1]
newbffit[2] = [0, 0, 0, 0, 0]
else:
newbffit = bffit
bffitlist.append(newbffit)
# NOTE: to get the gaussian fit corresponding to bfsmoothlist[i], use bffitlist[i][1].
# RV1 for observation i is bffitlist[i][0][1] +/- bffitlist[i][2][1].
# RV2 for observation i is bffitlist[i][0][4] +/- bffitlist[i][2][4].
# (note: need to check if bffit[2] == None before calling bffit[2][1] or bffit[2][4])
print('{0:s} {1:.3f} {2:.2f} {3:.4f} {4:.4f} \t {5:.3f} {6:.2f} {7:.4f} {8:.4f}'.format(
filenamelist[i][-20:], newbffit[0][0], newbffit[0][2], newbffit[0][1], newbffit[2][1],
newbffit[0][3], newbffit[0][5], newbffit[0][4], newbffit[2][4]))
print(' ')
print('You MUST manually guesstimate the location of each Gaussian\'s peak in %s!' % gausspars)
print('Until you do, the above values will be WRONG and the plot will look TERRIBLE.')
print(' ')
return bffitlist
def rvphasecalc(bjdinfile, bjdoffset, nspec, period, BJD0, rvraw1, rvraw1_err, rvraw2, rvraw2_err, rvstd, bcvstd):
rv1 = []; rv2 = []
rv1.append(0); rv2.append(0)
rv1_err = []; rv2_err = []
rv1_err.append(0); rv2_err.append(0)
g1 = open(bjdinfile)
#g2 = open(outfile, 'w')
print('Calculating RVs...')
bjdmid, bcv = np.loadtxt(g1, comments='#', usecols=(1,2), unpack=True)
bjdfunny = bjdmid - bjdoffset
phase = []
phase.append(0)
for i in range(1, nspec):
fracP = (bjdmid[i] - BJD0) / period
if fracP < 0:
phase.append(1 + (fracP % 1))
cycle = int(fracP) - 1
else:
phase.append((fracP % 1))
cycle = int(fracP)
rv1.append(rvraw1[i] + bcv[i] - rvstd - bcvstd) # DON'T MESS UP THE +/- SIGNS
rv2.append(rvraw2[i] + bcv[i] - rvstd - bcvstd)
rv1_err.append(rvraw1_err[i])
rv2_err.append(rvraw2_err[i])
#print ('%.9f %.9f %.9f %.5f %.5f %.5f %.5f' % (bjdmid[i], phase[i], bjdfunny[i],
# rv1[i], rv1_err[i], rv2[i], rv2_err[i]), file=g2)
g1.close()
#g2.close()
print(' ')
#print('BJD, phase, and RVs written to %s.' % outfile)
#print('Use rvplotmaker.py to plot the RV curve.')
return phase, bjdfunny, rv1, rv2, rv1_err, rv2_err
|
mrawls/BF-simulator
|
BF_functions.py
|
Python
|
mit
| 14,918
|
[
"Gaussian"
] |
ef42c8428e780e388207f76d44c9445a5ed6dc64b692d46769f63e6ee94bdc01
|
# -*- coding: utf-8 -*-
# Django settings for seahub project.
import sys
import os
import re
import random
import string
from seaserv import FILE_SERVER_ROOT, FILE_SERVER_PORT
PROJECT_ROOT = os.path.join(os.path.dirname(__file__), os.pardir)
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '%s/seahub/seahub.db' % PROJECT_ROOT, # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Asia/Shanghai'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = '%s/media/' % PROJECT_ROOT
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = '%s/assets/' % MEDIA_ROOT
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/media/assets/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'%s/static' % PROJECT_ROOT,
)
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.CachedStaticFilesStorage'
# StaticI18N config
STATICI18N_ROOT = '%s/static/scripts' % PROJECT_ROOT
STATICI18N_OUTPUT_DIR = 'i18n'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
'compressor.finders.CompressorFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'n*v0=jz-1rz@(4gx^tf%6^e7c&um@2)g-l=3_)t@19a69n1nv6'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
# Order is important
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'seahub.auth.middleware.AuthenticationMiddleware',
'seahub.base.middleware.BaseMiddleware',
'seahub.base.middleware.InfobarMiddleware',
'seahub.password_session.middleware.CheckPasswordHash'
)
SITE_ROOT_URLCONF = 'seahub.urls'
ROOT_URLCONF = 'djblets.util.rooturl'
SITE_ROOT = '/'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'seahub.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, '../../seahub-data/custom/templates'),
os.path.join(PROJECT_ROOT, 'seahub/templates'),
)
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
LANGUAGES = (
('ca', gettext_noop('català')),
('de', gettext_noop(u'Deutsch')),
('en', gettext_noop('English')),
('es', gettext_noop('Español')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('fr', gettext_noop('français')),
('he', gettext_noop('עברית')),
('hu', gettext_noop('Magyar')),
('is', gettext_noop('Íslenska')),
('it', gettext_noop('Italiano')),
('ja', gettext_noop('日本語')),
('ko', gettext_noop('한국어')),
('lv', gettext_noop('Latvian')),
('nl', gettext_noop('Nederlands')),
('pl', gettext_noop('Polski')),
('pt-br', gettext_noop('Portuguese, Brazil')),
('ru', gettext_noop(u'Русский')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sv', gettext_noop('Svenska')),
('th', gettext_noop('ไทย')),
('tr', gettext_noop('Türkçe')),
('uk', gettext_noop('українська мова')),
('zh-cn', gettext_noop(u'简体中文')),
('zh-tw', gettext_noop(u'繁體中文')),
)
LOCALE_PATHS = (
os.path.join(PROJECT_ROOT, 'locale'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'djblets.util.context_processors.siteRoot',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'seahub.base.context_processors.base',
)
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'registration',
'captcha',
'compressor',
'statici18n',
'seahub.api2',
'seahub.avatar',
'seahub.base',
'seahub.contacts',
'seahub.wiki',
'seahub.group',
'seahub.message',
'seahub.notifications',
'seahub.options',
'seahub.profile',
'seahub.share',
'seahub.help',
'seahub.thumbnail',
'seahub.password_session',
)
AUTHENTICATION_BACKENDS = (
'seahub.base.accounts.AuthBackend',
)
LOGIN_REDIRECT_URL = '/profile/'
ACCOUNT_ACTIVATION_DAYS = 7
# Enable or disable make group public
ENABLE_MAKE_GROUP_PUBLIC = False
# show or hide library 'download' button
SHOW_REPO_DOWNLOAD_BUTTON = False
# enable 'upload folder' or not
ENABLE_UPLOAD_FOLDER = False
# mininum length for password of encrypted library
REPO_PASSWORD_MIN_LENGTH = 8
# mininum length for user's password
USER_PASSWORD_MIN_LENGTH = 6
# LEVEL based on four types of input:
# num, upper letter, lower letter, other symbols
# '3' means password must have at least 3 types of the above.
USER_PASSWORD_STRENGTH_LEVEL = 3
# default False, only check USER_PASSWORD_MIN_LENGTH
# when True, check password strength level, STRONG(or above) is allowed
USER_STRONG_PASSWORD_REQUIRED = False
# Using server side crypto by default, otherwise, let user choose crypto method.
FORCE_SERVER_CRYPTO = True
# Enable or disable repo history setting
ENABLE_REPO_HISTORY_SETTING = True
# File preview
FILE_PREVIEW_MAX_SIZE = 30 * 1024 * 1024
OFFICE_PREVIEW_MAX_SIZE = 2 * 1024 * 1024
USE_PDFJS = True
FILE_ENCODING_LIST = ['auto', 'utf-8', 'gbk', 'ISO-8859-1', 'ISO-8859-5']
FILE_ENCODING_TRY_LIST = ['utf-8', 'gbk']
HIGHLIGHT_KEYWORD = False # If True, highlight the keywords in the file when the visit is via clicking a link in 'search result' page.
# Common settings(file extension, storage) for avatar and group avatar.
AVATAR_FILE_STORAGE = '' # Replace with 'seahub.base.database_storage.DatabaseStorage' if save avatar files to database
AVATAR_ALLOWED_FILE_EXTS = ('.jpg', '.png', '.jpeg', '.gif')
# Avatar
AVATAR_STORAGE_DIR = 'avatars'
AVATAR_HASH_USERDIRNAMES = True
AVATAR_HASH_FILENAMES = True
AVATAR_GRAVATAR_BACKUP = False
AVATAR_DEFAULT_URL = '/avatars/default.png'
AVATAR_DEFAULT_NON_REGISTERED_URL = '/avatars/default-non-register.jpg'
AVATAR_MAX_AVATARS_PER_USER = 1
AVATAR_CACHE_TIMEOUT = 14 * 24 * 60 * 60
AUTO_GENERATE_AVATAR_SIZES = (16, 20, 24, 28, 32, 36, 40, 48, 60, 80, 290)
# Group avatar
GROUP_AVATAR_STORAGE_DIR = 'avatars/groups'
GROUP_AVATAR_DEFAULT_URL = 'avatars/groups/default.png'
AUTO_GENERATE_GROUP_AVATAR_SIZES = (20, 24, 32, 36, 48, 56)
LOG_DIR = os.environ.get('SEAHUB_LOG_DIR', '/tmp')
CACHE_DIR = "/tmp"
install_topdir = os.path.expanduser(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
if 'win32' in sys.platform:
try:
CCNET_CONF_PATH = os.environ['CCNET_CONF_DIR']
if not CCNET_CONF_PATH: # If it's set but is an empty string.
raise KeyError
except KeyError:
raise ImportError("Settings cannot be imported, because environment variable CCNET_CONF_DIR is undefined.")
else:
LOG_DIR = os.environ.get('SEAHUB_LOG_DIR', os.path.join(CCNET_CONF_PATH, '..'))
CACHE_DIR = os.path.join(CCNET_CONF_PATH, '..')
install_topdir = os.path.join(CCNET_CONF_PATH, '..')
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': os.path.join(CACHE_DIR, 'seahub_cache'),
'OPTIONS': {
'MAX_ENTRIES': 1000000
}
}
}
# rest_framwork
REST_FRAMEWORK = {
'DEFAULT_THROTTLE_RATES': {
'ping': '600/minute',
'anon': '5/minute',
'user': '300/minute',
},
}
# file and path
MAX_UPLOAD_FILE_NAME_LEN = 255
MAX_FILE_NAME = MAX_UPLOAD_FILE_NAME_LEN
MAX_PATH = 4096
FILE_LOCK_EXPIRATION_DAYS = 0
# Whether or not activate user when registration complete.
# If set to ``False``, new user will be activated by admin or via activate link.
ACTIVATE_AFTER_REGISTRATION = True
# Whether or not send activation Email to user when registration complete.
# This option will be ignored if ``ACTIVATE_AFTER_REGISTRATION`` set to ``True``.
REGISTRATION_SEND_MAIL = False
REQUIRE_DETAIL_ON_REGISTRATION = False
# Account initial password, for password resetting.
# INIT_PASSWD can either be a string, or a function (function has to be set without the brackets)
def genpassword():
return ''.join([random.choice(string.digits + string.letters) for i in range(0, 10)])
INIT_PASSWD = genpassword
# browser tab title
SITE_TITLE = 'Private Seafile'
# Base url and name used in email sending
SITE_BASE = 'http://seafile.com'
SITE_NAME = 'Seafile'
# Path to the Logo Imagefile (relative to the media path)
LOGO_PATH = 'img/seafile_logo.png'
# logo size. the unit is 'px'
LOGO_WIDTH = 149
LOGO_HEIGHT = 32
# css to modify the seafile css (e.g. css/my_site.css)
BRANDING_CSS = ''
# Using Django to server static file. Set to `False` if deployed behide a web
# server.
SERVE_STATIC = True
# Enalbe or disalbe registration on web.
ENABLE_SIGNUP = False
# For security consideration, please set to match the host/domain of your site, e.g., ALLOWED_HOSTS = ['.example.com'].
# Please refer https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts for details.
ALLOWED_HOSTS = ['*']
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s:%(lineno)s %(funcName)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'default': {
'level':'WARN',
'class':'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOG_DIR, 'seahub.log'),
'maxBytes': 1024*1024*10, # 10 MB
'formatter':'standard',
},
'request_handler': {
'level':'WARN',
'class':'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOG_DIR, 'seahub_django_request.log'),
'maxBytes': 1024*1024*10, # 10 MB
'formatter':'standard',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'WARN',
'propagate': True
},
'django.request': {
'handlers': ['request_handler', 'mail_admins'],
'level': 'WARN',
'propagate': False
},
}
}
#Login Attempt
LOGIN_ATTEMPT_LIMIT = 3
LOGIN_ATTEMPT_TIMEOUT = 15 * 60 # in seconds (default: 15 minutes)
# Age of cookie, in seconds (default: 1 day).
SESSION_COOKIE_AGE = 24 * 60 * 60
# Days of remembered login info (deafult: 7 days)
LOGIN_REMEMBER_DAYS = 7
#Share Access
SHARE_ACCESS_PASSWD_TIMEOUT = 60 * 60
SEAFILE_VERSION = '4.3.0'
# Compress static files(css, js)
COMPRESS_URL = MEDIA_URL
COMPRESS_ROOT = MEDIA_ROOT
COMPRESS_DEBUG_TOGGLE = 'nocompress'
COMPRESS_CSS_HASHING_METHOD = 'content'
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.CSSMinFilter',
]
###################
# Image Thumbnail #
###################
# Enable or disable thumbnail
ENABLE_THUMBNAIL = True
# Absolute filesystem path to the directory that will hold thumbnail files.
SEAHUB_DATA_ROOT = os.path.join(PROJECT_ROOT, '../../seahub-data')
if os.path.exists(SEAHUB_DATA_ROOT):
THUMBNAIL_ROOT = os.path.join(SEAHUB_DATA_ROOT, 'thumbnail')
else:
THUMBNAIL_ROOT = os.path.join(PROJECT_ROOT, 'seahub/thumbnail/thumb')
THUMBNAIL_EXTENSION = 'png'
# for thumbnail: height(px) and width(px)
THUMBNAIL_DEFAULT_SIZE = 48
# size(MB) limit for generate thumbnail
THUMBNAIL_IMAGE_COMPRESSED_SIZE_LIMIT = 1
THUMBNAIL_IMAGE_ORIGINAL_SIZE_LIMIT = 256
#####################
# Global AddressBook #
#####################
ENABLE_GLOBAL_ADDRESSBOOK = True
#####################
# Folder Permission #
#####################
ENABLE_FOLDER_PERM = False
#####################
# Sudo Mode #
#####################
ENABLE_SUDO_MODE = True
#################
# Email sending #
#################
SEND_EMAIL_ON_ADDING_SYSTEM_MEMBER = True # Whether to send email when a system staff adding new member.
SEND_EMAIL_ON_RESETTING_USER_PASSWD = True # Whether to send email when a system staff resetting user's password.
##########################
# Settings for seafevents #
##########################
def get_events_conf_file():
if not 'CCNET_CONF_DIR' in os.environ:
return
ccnet_dir = os.environ['CCNET_CONF_DIR']
seafile_ini = os.path.join(ccnet_dir, 'seafile.ini')
if not os.path.exists(seafile_ini):
return
with open(seafile_ini, 'r') as fp:
seafile_data_dir = fp.read().strip()
seafevents_conf = os.path.join(seafile_data_dir, 'seafevents.conf')
if os.path.exists(seafevents_conf):
globals()['EVENTS_CONFIG_FILE'] = seafevents_conf
get_events_conf_file()
##########################
# Settings for Extra App #
##########################
ENABLE_PUBFILE = False
ENABLE_SUB_LIBRARY = True
############################
# Settings for Seahub Priv #
############################
# Replace from email to current user instead of email sender.
REPLACE_FROM_EMAIL = False
# Add ``Reply-to`` header, see RFC #822.
ADD_REPLY_TO_HEADER = False
CLOUD_DEMO_USER = 'demo@seafile.com'
#####################
# External settings #
#####################
def load_local_settings(module):
'''Import any symbols that begin with A-Z. Append to lists any symbols
that begin with "EXTRA_".
'''
if hasattr(module, 'HTTP_SERVER_ROOT'):
if not hasattr(module, 'FILE_SERVER_ROOT'):
module.FILE_SERVER_ROOT = module.HTTP_SERVER_ROOT
del module.HTTP_SERVER_ROOT
for attr in dir(module):
match = re.search('^EXTRA_(\w+)', attr)
if match:
name = match.group(1)
value = getattr(module, attr)
try:
globals()[name] += value
except KeyError:
globals()[name] = value
elif re.search('^[A-Z]', attr):
globals()[attr] = getattr(module, attr)
# Load seahub_extra_settings.py
try:
from seahub_extra import seahub_extra_settings
except ImportError:
pass
else:
load_local_settings(seahub_extra_settings)
del seahub_extra_settings
# Load local_settings.py
try:
import seahub.local_settings
except ImportError:
pass
else:
load_local_settings(seahub.local_settings)
del seahub.local_settings
# Load seahub_settings.py in server release
try:
sys.path.insert(0, install_topdir)
import seahub_settings
except ImportError:
pass
else:
# In server release, sqlite3 db file is <topdir>/seahub.db
DATABASES['default']['NAME'] = os.path.join(install_topdir, 'seahub.db')
if 'win32' not in sys.platform:
# In server release, gunicorn is used to deploy seahub
INSTALLED_APPS += ('gunicorn', )
load_local_settings(seahub_settings)
del seahub_settings
# Remove install_topdir from path
sys.path.pop(0)
if 'win32' in sys.platform:
INSTALLED_APPS += ('django_wsgiserver', )
fp = open(os.path.join(install_topdir, "seahub.pid"), 'w')
fp.write("%d\n" % os.getpid())
fp.close()
# Put here after loading other settings files if `SITE_ROOT` is modified in
# other settings files.
LOGIN_URL = SITE_ROOT + 'accounts/login'
INNER_FILE_SERVER_ROOT = 'http://127.0.0.1:' + FILE_SERVER_PORT
|
madflow/seahub
|
seahub/settings.py
|
Python
|
apache-2.0
| 18,567
|
[
"VisIt"
] |
266ad4afcf0dcc2773176023bdecca6f0a317e07f5128c45e9372bdf58194875
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 6 12:05:13 2015
@author: landman
Test convergence
"""
import sys
#sys.path.insert(0, '/home/bester/Algorithm') #On cluster
sys.path.insert(0, 'fortran_mods/') #At home PC
from numpy import size, exp, any,loadtxt, linspace, array,zeros, sqrt, pi, mean, std, load, asarray, ones, argwhere, log2, ceil, tile, floor, log, diag, dot, eye, nan_to_num
from scipy.interpolate import UnivariateSpline as uvs
from numpy.random import randn
from numpy.linalg import cholesky, inv, solve, eigh
from numpy.linalg.linalg import norm
import matplotlib as mpl
mpl.rcParams.update({'font.size': 12, 'font.family': 'serif'})
import matplotlib.pyplot as plt
from scipy.integrate import quad, trapz
#from sympy import symbols, roots
#from sympy.utilities import lambdify
#from mpmath import elliprj
import CIVP
global kappa
kappa = 8.0*pi
class GP(object):
def __init__(self,x,y,sy,xp,THETA):
"""
This is a barebones Gaussian process class that allows to draw samples
for a given set of hyper-parameters (note already optimised). It does
not support derivative observations or sampling.
Input: x = independent variable of data point
y = dependent varaiable of data point
sy = 1-sig uncertainty of data point (std. dev.) (Could be modified to use full covariance matrix)
xp = independent variable of targets
THETA = Initial guess for hyper-parameter values
prior_mean = function (lambda or spline or whatever) that can be evaluated at x/xp
"""
#Compute quantities that are used often
self.n = x.size
self.nlog2pi = self.n*log(2*pi)
self.np = xp.size
self.zero = zeros(self.np)
self.nplog2pi = self.np*log(2*pi)
self.eyenp = eye(self.np)
#Get vectorised forms of x_i - x_j
self.XX = self.abs_diff(x,x)
self.XXp = self.abs_diff(x,xp)
self.XXpp = self.abs_diff(xp,xp)
self.ydat = y
self.SIGMA = diag(sy**2) #Set covariance matrix
self.THETA = THETA
self.K = self.cov_func(self.XX)
self.L = cholesky(self.K + self.SIGMA)
self.sdet = 2*sum(log(diag(self.L)))
self.Linv = inv(self.L)
self.Linvy = solve(self.L,self.ydat)
self.logL = self.log_lik(self.Linvy,self.sdet)
self.Kp = self.cov_func(self.XXp)
self.LinvKp = dot(self.Linv,self.Kp)
self.Kpp = self.cov_func(self.XXpp)
self.fmean = dot(self.LinvKp.T,self.Linvy)
self.fcov = self.Kpp - dot(self.LinvKp.T,self.LinvKp)
self.W,self.V = eigh(self.fcov)
self.srtW = diag(nan_to_num(sqrt(nan_to_num(self.W))))
def abs_diff(self,x,xp):
"""
Creates matrix of differences (x_i - x_j) for vectorising.
"""
n = size(x)
np = size(xp)
return tile(x,(np,1)).T - tile(xp,(n,1))
def cov_func(self,x):
"""
Returns the covariance function evaluated at the
"""
return self.THETA[0]**2*exp(-sqrt(7)*abs(x)/self.THETA[1])*(1 + sqrt(7)*abs(x)/self.THETA[1] + 14*abs(x)**2/(5*self.THETA[1]**2) + 7*sqrt(7)*abs(x)**3/(15*self.THETA[1]**3))
def simp_sample(self):
return self.fmean + self.V.dot(self.srtW.dot(randn(self.np)))
def log_lik(self,Linvy,sdet):
"""
Quick marginal log lik for hyper-parameter marginalisation
"""
return -0.5*dot(Linvy.T,Linvy) - 0.5*sdet - 0.5*self.nlog2pi
class SSU(object):
def __init__(self,Lambda,H,rho,zmax,NJ):
#Set max number of spatial grid points
self.NJ = NJ
#Set redshift range
self.z = linspace(0,zmax,NJ)
#set dimensionless density params and Lambda
self.Om0 = 8*pi*rho[0]/(3*H[0]**2)
self.Lambda = Lambda
self.OL0 = self.Lambda/(3*H[0]**2)
self.Ok0 = 1-self.Om0-self.OL0
#print self.Om0, self.OL0, self.Ok0
#Set function to get t0
self.t0f = lambda x,a,b,c,d: sqrt(x)/(d*sqrt(a + b*x + c*x**3))
#Set rhoz and Hz on finest grid
self.rhoz = rho
self.Hz = H
def run_test(self):
self.T1i = zeros([3,self.NJ/4])
self.T1f = zeros([3,self.NJ/4])
self.T2i = zeros([3,self.NJ/4])
self.T2f = zeros([3,self.NJ/4])
self.Di = zeros([3,self.NJ/4])
self.Df = zeros([3,self.NJ/4])
self.Ei = zeros([3,self.NJ/4])
self.Ef = zeros([3,self.NJ/4])
for i in range(3):
#set spatial grid resolutions
NJ = self.NJ/2**i
#set redshifts
z = self.z[0::2**i]
#set input functions
Hz = self.Hz[0::2**i]
rhoz = self.rhoz[0::2**i]
#set up the three different spatial grids
v,H,rho,u,NI,delv = self.affine_grid(z,Hz,rhoz)
#set the three time grids
w, delw = self.age_grid(NI,NJ,delv)
#Do integration
D,S,Q,A,Z,rho,u,up,upp,ud,rhod,rhop,Sp,Qp,Zp,LLTBCon,T1,T2 = self.integrate(u,rho,self.Lambda,v,delv,w,delw)
self.T1 = T1
self.T2 = T2
#Store quantities whos order of convergence we are testing at the points corresponding to the coarsest grid
self.Di[i,:] = D[0::2**(2-i),0]
self.Df[i,:] = D[0::2**(2-i),-1]
self.T1i[i,:] = T1[0::2**(2-i),0]
self.T1f[i,:] = T1[0::2**(2-i),-1]
self.T2i[i,:] = T2[0::2**(2-i),0]
self.T2f[i,:] = T2[0::2**(2-i),-1]
self.Ei[i,:] = LLTBCon[0::2**(2-i),0]
self.Ef[i,:] = LLTBCon[0::2**(2-i),-10]
#Get order of convergence
IDi = argwhere(self.Di[2,:] != 0.0)
RDi = norm(self.Di[2,IDi] - self.Di[0,IDi])/norm(self.Di[1,IDi] - self.Di[0,IDi])
pDi = log2(RDi + 1)
IDf = argwhere(self.Df[2,:] != 0.0)
RDf = norm(self.Df[2,IDf] - self.Df[0,IDf])/norm(self.Df[1,IDf] - self.Df[0,IDf])
pDf = log2(RDf + 1)
I1i = argwhere(self.T1i[2,:] != 0.0)
R1i = norm(self.T1i[2,I1i] - self.T1i[0,I1i])/norm(self.T1i[1,I1i] - self.T1i[0,I1i])
p1i = log2(R1i + 1)
I1f = argwhere(self.T1f[2,:] != 0.0)
R1f = norm(self.T1f[2,I1f] - self.T1f[0,I1f])/norm(self.T1f[1,I1f] - self.T1f[0,I1f])
p1f = log2(R1f + 1)
I2i = argwhere(self.T2i[2,:] != 0.0)
R2i = norm(self.T2i[2,I2i] - self.T2i[0,I2i])/norm(self.T2i[1,I2i] - self.T2i[0,I2i])
p2i = log2(R2i + 1)
I2f = argwhere(self.T2f[2,:] != 0.0)
R2f = norm(self.T2f[2,I2f] - self.T2f[0,I2f])/norm(self.T2f[1,I2f] - self.T2f[0,I2f])
p2f = log2(R2f + 1)
IEi = argwhere(self.Ei[2,:] != 0.0)
REi = norm(self.Ei[2,IEi] - self.Ei[0,IEi])/norm(self.Ei[1,IEi] - self.Ei[0,IEi])
pEi = log2(REi + 1)
IEf = argwhere(self.Ef[2,:] != 0.0)
REf = norm(self.Ef[2,IEf] - self.Ef[0,IEf])/norm(self.Ef[1,IEf] - self.Ef[0,IEf])
pEf = log2(REf + 1)
return pDi, pDf, p1i, p1f, p2i, p2f, pEi, pEf
def update_samps(self,H,rho,Lambda):
#This function allows you to update the sample values
self.Hz = H
self.rhoz = rho
self.Om0 = 8*pi*rho[0]/(3*H[0]**2)
self.Lambda = Lambda
self.OL0 = self.Lambda/(3*H[0]**2)
self.Ok0 = 1 - self.Om0 - self.OL0
self.t0 = quad(self.t0f,0,1.0,args=(self.Om0,self.Ok0,self.OL0,self.Hz[0]))[0]
return
def affine_grid(self,z,Hz,rhoz):
#this functions gets the data as a function of evenly spaced affine parameter values
dnuzo = uvs(z,1/((1+z)**2*Hz),k=3,s=0.0)
nuzo = dnuzo.antiderivative()
nuz = nuzo(z)
nuz[0] = 0.0
NJ = z.size
NI = int(ceil(3.0*(NJ - 1)/nuz[-1] + 1))
nu = linspace(0,nuz[-1],NJ)
delnu = (nu[-1] - nu[0])/(NJ-1)
Ho = uvs(nuz,Hz,s=0.0)
H = Ho(nu)
rhoo = uvs(nuz,rhoz,s=0.0)
rho = rhoo(nu)
u1o = uvs(nuz,1+z,s=0.0)
u1 = u1o(nu)
u1[0] = 1.0
return nu,H,rho,u1,NI,delnu
def age_grid(self,NI,NJ,delv):
w = linspace(self.t0,self.t0 - 1.0,NI)
delw = (w[0] - w[-1])/(NI-1)
if delw/delv > 0.5:
print "Warning CFL might be violated."
return w, delw
def integrate(self,u,rho,Lam,v,delv,w,delw):
D,S,Q,A,Z,rho,rhod,rhop,u,ud,up,upp,vmax,vmaxi,r,t,X,dXdr,drdv,drdvp,Sp,Qp,Zp,LLTBCon,Dww,Aw,T1,T2 = CIVP.solve(v,delv,w,delw,u,rho,Lam)
self.vmaxi = vmaxi
return D,S,Q,A,Z,rho,u,up,upp,ud,rhod,rhop,Sp,Qp,Zp,LLTBCon,T1,T2
if __name__ == "__main__":
#Set grid
nstar = 800 #zD.size
#Set redshift
zmax = 2.0
zp = linspace(0,zmax,nstar)
#Set GP hypers (optimised values for simulated data)
Xrho = array([0.04529012,1.60557223])
XH = array([0.54722799,2.30819676])
#Load prior data
zH,Hz,sHz = loadtxt('RawData/SimH.txt',unpack=True)
zrho,rhoz,srhoz = loadtxt('RawData/Simrho.txt',unpack=True)
KH = GP(zH,Hz,sHz,zp,XH)
Krho = GP(zrho,rhoz,srhoz,zp,Xrho)
# plt.figure('H')
# plt.plot(zp,KH.fmean,'k')
# plt.errorbar(zH,Hz,sHz,fmt='xr')
# plt.figure('rho')
# plt.plot(zp,Krho.fmean,'k')
# plt.errorbar(zrho,rhoz,srhoz,fmt='xr')
#Do integrations with a few random samples
nsamp = 10
pDi = zeros(nsamp)
pDf = zeros(nsamp)
p1i = zeros(nsamp)
p2i = zeros(nsamp)
p1f = zeros(nsamp)
p2f = zeros(nsamp)
pEi = zeros(nsamp)
pEf = zeros(nsamp)
Lam0 = 3*0.7*0.2335**2
sLam = 0.05*Lam0
H = KH.simp_sample()
rho = Krho.simp_sample()
Lam = Lam0 + sLam*float(randn(1))
U = SSU(Lam,H,rho,zmax,nstar)
for i in range(nsamp):
print i
#Draw a sample of each
H = KH.simp_sample()
rho = Krho.simp_sample()
Lam = Lam0 + sLam*float(randn(1))
U.update_samps(H,rho,Lam)
pDi[i], pDf[i], p1i[i], p1f[i], p2i[i], p2f[i], pEi[i], pEf[i] = U.run_test()
#Get averages of convergence factors and generate table
pDim = mean(pDi)
spDi = std(pDi)
pDfm = mean(pDf)
spDf = std(pDf)
p1im = mean(p1i)
sp1i = std(p1i)
p2im = mean(p2i)
sp2i = std(p2i)
p1fm = mean(p1f)
sp1f = std(p1f)
p2fm = mean(p2f)
sp2f = std(p2f)
pEim = mean(pEi)
spEi = std(pEi)
pEfm = mean(pEf)
spEf = std(pEf)
print pDim, spDi
print pDfm, spDf
print p1im, sp1i
print p1fm, sp1f
print p2im, sp2i
print p2fm, sp2f
print pEim, spEi
print pEfm, spEf
# #Create figure
# figLLTB, axLLTB = plt.subplots(nrows = 1, ncols = 2,figsize=(15,5))
#
# #Do LLTBi figure
# nret = 100
# l = linspace(0,1,nret)
# err = 1e-5
# LLTBimax = zeros(nret)
# for i in range(nret):
# LLTBimax[i] = max(abs(LLTBConsi[i,:]))
# LLTBimax = abs(LLTBimax + err*randn(nret)/50) + err/10
# axLLTB[0].fill_between(l,LLTBimax,facecolor="blue",alpha=0.5)
# axLLTB[0].plot(l,ones(nret)*err, 'k',label=r'$\epsilon_p = \Delta v^2$')
# axLLTB[0].set_ylabel(r'$ E_i $',fontsize=25)
# axLLTB[0].legend()
#
# #Do LLTBf figure
# LLTBfmax = zeros(nret)
# for i in range(nret):
# LLTBfmax[i] = max(abs(LLTBConsf[i,:]))
# LLTBfmax = abs(LLTBimax + err*randn(nret)/50) + err/5
# axLLTB[1].fill_between(l,LLTBfmax,facecolor="blue",alpha=0.5)
# axLLTB[1].plot(l,ones(nret)*err,'k',label=r'$\epsilon_p = \Delta v^2$')
# axLLTB[1].set_ylabel(r'$ E_f $',fontsize=25)
# axLLTB[1].legend()
#
# figLLTB.tight_layout()
# figLLTB.savefig('ProcessedData/LLTB.png',dpi=250)
|
landmanbester/Copernicus
|
Test_Convergence.py
|
Python
|
gpl-3.0
| 11,778
|
[
"Gaussian"
] |
63bf2a16e790984dfb646d5d9583748d23f4b23821db9555631af3156372436d
|
"""MPF plugin which implements Logic Blocks"""
# logic_blocks.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import logging
from mpf.system.tasks import DelayManager
from mpf.system.config import Config
from mpf.system.utility_functions import Util
class LogicBlocks(object):
"""LogicBlock Manager."""
def __init__(self, machine):
self.log = logging.getLogger('Logic Blocks Manager')
self.machine = machine
# Tell the mode controller that it should look for LogicBlock items in
# modes.
self.machine.mode_controller.register_start_method(self._process_config,
'logic_blocks')
# Process game-wide (i.e. not in modes) logic blocks
self.machine.events.add_handler('player_add_success',
self._create_player_logic_blocks)
self.machine.events.add_handler('player_turn_start',
self._player_turn_start)
self.machine.events.add_handler('player_turn_stop',
self._player_turn_stop)
def _create_player_logic_blocks(self, player, **kwargs):
"""Creates the game-wide logic blocks for this player.
Args:
player: The player object.
**kwargs: Does nothing. Just here to allow this method to be called
via an event handler.
Note that this method is automatically added as a handler to the
'player_add_success' event.
"""
player.uvars['logic_blocks'] = set()
if 'logic_blocks' in self.machine.config:
self._create_logic_blocks(config=self.machine.config['logic_blocks'],
player=player,
enable=False)
def _player_turn_start(self, player, **kwargs):
self.log.debug("Processing player_turn_start")
for block in player.uvars['logic_blocks']:
block.create_control_events()
def _player_turn_stop(self, player, **kwargs):
self.log.debug("Player logic blocks: %s", player.uvars['logic_blocks'])
for block in player.uvars['logic_blocks'].copy():
# copy since each logic block will remove itself from the list
# we're iterating over
block._player_turn_stop()
def _process_config(self, config, priority=0, mode=None, enable=True):
self.log.debug("Processing LogicBlock configuration.")
blocks_added = self._create_logic_blocks(config=config,
player=self.machine.game.player,
enable=enable)
if mode:
for block in blocks_added:
block.create_control_events()
return self._unload_logic_blocks, blocks_added
def _create_logic_blocks(self, config, player, enable=True):
# config is localized for LogicBlock
blocks_added = set()
if 'counters' in config:
for item in config['counters']:
block = Counter(self.machine, item, player,
config['counters'][item])
blocks_added.add(block)
if 'accruals' in config:
for item in config['accruals']:
block = Accrual(self.machine, item, player,
config['accruals'][item])
blocks_added.add(block)
if 'sequences' in config:
for item in config['sequences']:
block = Sequence(self.machine, item, player,
config['sequences'][item])
blocks_added.add(block)
# Enable any logic blocks that do not have specific enable events
if enable:
for block in blocks_added:
if not block.config['enable_events']:
block.enable()
player.uvars['logic_blocks'] |= blocks_added
return blocks_added
def _unload_logic_blocks(self, block_list):
self.log.debug("Unloading Logic Blocks")
for block in block_list:
block.unload()
class LogicBlock(object):
"""Parent class for each of the logic block classes."""
def __init__(self, machine, name, player, config):
self.machine = machine
self.name = name
self.player = player
self.handler_keys = set()
self.enabled = False
config_spec = '''
enable_events: list|None
disable_events: list|None
reset_events: list|None
restart_events: list|None
restart_on_complete: boolean|False
disable_on_complete: boolean|True
persist_state: boolean|False
'''
self.config = Config.process_config(config_spec=config_spec,
source=config)
if 'events_when_complete' not in config:
self.config['events_when_complete'] = ([
'logicblock_' + self.name + '_complete'])
else:
self.config['events_when_complete'] = Util.string_to_list(
config['events_when_complete'])
def __repr__(self):
return '<LogicBlock.{}>'.format(self.name)
def create_control_events(self):
# todo need to run this when a mode start creates a logic block
# If this logic block is enabled, keep it enabled
# If it's not enabled, enable it if there are no other enable events
if not self.enabled and not self.config['enable_events']:
self.enable()
# Register for the events to enable, disable, and reset this LogicBlock
for event in self.config['enable_events']:
self.handler_keys.add(
self.machine.events.add_handler(event, self.enable))
for event in self.config['disable_events']:
self.handler_keys.add(
self.machine.events.add_handler(event, self.disable))
for event in self.config['reset_events']:
self.handler_keys.add(
self.machine.events.add_handler(event, self.reset))
for event in self.config['restart_events']:
self.handler_keys.add(
self.machine.events.add_handler(event, self.restart))
def _remove_all_event_handlers(self):
for key in self.handler_keys:
self.machine.events.remove_handler_by_key(key)
self.handler_keys = set()
def _player_turn_stop(self):
self._remove_all_event_handlers()
def unload(self):
self.disable()
self._remove_all_event_handlers()
try:
self.machine.game.player.uvars['logic_blocks'].remove(self)
except KeyError:
pass
def enable(self, **kwargs):
"""Enables this logic block. Automatically called when one of the
enable_event events is posted. Can also manually be called.
"""
self.log.debug("Enabling")
self.enabled = True
def disable(self, **kwargs):
"""Disables this logic block. Automatically called when one of the
disable_event events is posted. Can also manually be called.
"""
self.log.debug("Disabling")
self.enabled = False
self.machine.events.remove_handler(self.hit)
def reset(self, **kwargs):
"""Resets the progress towards completion of this logic block.
Automatically called when one of the reset_event events is called.
Can also be manually called.
"""
self.log.debug("Resetting")
def restart(self, **kwargs):
"""Restarts this logic block by calling reset() and enable()
Automatically called when one of the restart_event events is called.
Can also be manually called.
"""
self.log.debug("Restarting (resetting then enabling)")
self.reset()
self.enable()
def complete(self):
"""Marks this logic block as complete. Posts the 'events_when_complete'
events and optionally restarts this logic block or disables it,
depending on this block's configuration settings.
"""
self.log.debug("Complete")
if self.config['events_when_complete']:
for event in self.config['events_when_complete']:
self.machine.events.post(event)
if self.config['restart_on_complete']:
self.reset()
self.enable()
elif self.config['disable_on_complete']:
self.disable()
class Counter(LogicBlock):
"""A type of LogicBlock that tracks multiple hits of a single event.
This counter can be configured to track hits towards a specific end-goal
(like number of tilt hits to tilt), or it can be an open-ended count (like
total number of ramp shots).
It can also be configured to count up or to count down, and can have a
configurable counting interval.
"""
# todo settle time
def __init__(self, machine, name, player, config):
self.log = logging.getLogger('Counter.' + name)
self.log.debug("Creating Counter LogicBlock")
super(Counter, self).__init__(machine, name, player, config)
self.delay = DelayManager()
self.ignore_hits = False
self.hit_value = -1
config_spec = '''
count_events: list|None
count_complete_value: int|None
multiple_hit_window: ms|0
count_interval: int|1
direction: string|up
starting_count: int|0
'''
self.config = Config.process_config(config_spec=config_spec,
source=self.config)
if 'event_when_hit' not in self.config:
self.config['event_when_hit'] = ('counter_' + self.name +
'_hit')
if 'player_variable' not in self.config:
self.config['player_variable'] = self.name + '_count'
self.hit_value = self.config['count_interval']
if self.config['direction'] == 'down' and self.hit_value > 0:
self.hit_value *= -1
elif self.config['direction'] == 'up' and self.hit_value < 0:
self.hit_value *= -1
if not self.config['persist_state']:
self.player[self.config['player_variable']] = (
self.config['starting_count'])
def enable(self, **kwargs):
"""Enables this counter. Automatically called when one of the
'enable_event's is posted. Can also manually be called.
"""
super(Counter, self).enable()
self.machine.events.remove_handler(self.hit) # prevents multiples
for event in self.config['count_events']:
self.handler_keys.add(
self.machine.events.add_handler(event, self.hit))
def reset(self, **kwargs):
"""Resets the hit progress towards completion"""
super(Counter, self).reset(**kwargs)
self.player[self.config['player_variable']] = (
self.config['starting_count'])
def hit(self, **kwargs):
"""Increases the hit progress towards completion. Automatically called
when one of the `count_events`s is posted. Can also manually be
called.
"""
if not self.ignore_hits:
self.player[self.config['player_variable']] += self.hit_value
self.log.debug("Processing Count change. Total: %s",
self.player[self.config['player_variable']])
if self.config['count_complete_value'] is not None:
if (self.config['direction'] == 'up' and
self.player[self.config['player_variable']] >=
self.config['count_complete_value']):
self.complete()
elif (self.config['direction'] == 'down' and
self.player[self.config['player_variable']] <=
self.config['count_complete_value']):
self.complete()
if self.config['event_when_hit']:
self.machine.events.post(self.config['event_when_hit'],
count=self.player[self.config['player_variable']])
if self.config['multiple_hit_window']:
self.log.debug("Beginning Ignore Hits")
self.ignore_hits = True
self.delay.add(name='ignore_hits_within_window',
ms=self.config['multiple_hit_window'],
callback=self.stop_ignoring_hits)
def stop_ignoring_hits(self, **kwargs):
"""Causes the Counter to stop ignoring subsequent hits that occur
within the 'multiple_hit_window'. Automatically called when the window
time expires. Can safely be manually called.
"""
self.log.debug("Ending Ignore hits")
self.ignore_hits = False
class Accrual(LogicBlock):
"""A type of LogicBlock which tracks many different events (steps) towards
a goal, with the steps being able to happen in any order.
"""
def __init__(self, machine, name, player, config):
self.log = logging.getLogger('Accrual.' + name)
self.log.debug("Creating Accrual LogicBlock")
super(Accrual, self).__init__(machine, name, player, config)
config_spec = '''
events: list_of_lists
'''
self.config = Config.process_config(config_spec=config_spec,
source=self.config)
if 'player_variable' not in config:
self.config['player_variable'] = self.name + '_status'
if not self.player[self.config['player_variable']]:
self.player[self.config['player_variable']] = (
[False] * len(self.config['events']))
elif not self.config['persist_state']:
self.player[self.config['player_variable']] = (
[False] * len(self.config['events']))
def enable(self, **kwargs):
"""Enables this accrual. Automatically called when one of the
'enable_events' is posted. Can also manually be called.
"""
super(Accrual, self).enable()
self.machine.events.remove_handler(self.hit) # prevents multiples
for entry_num in range(len(self.config['events'])):
for event in self.config['events'][entry_num]:
self.handler_keys.add(
self.machine.events.add_handler(event, self.hit,
step=entry_num))
def reset(self, **kwargs):
"""Resets the hit progress towards completion"""
super(Accrual, self).reset(**kwargs)
self.player[self.config['player_variable']] = (
[False] * len(self.config['events']))
self.log.debug("Status: %s",
self.player[self.config['player_variable']])
def hit(self, step, **kwargs):
"""Increases the hit progress towards completion. Automatically called
when one of the `count_events` is posted. Can also manually be
called.
Args:
step: Integer of the step number (0 indexed) that was just hit.
"""
self.log.debug("Processing hit for step: %s", step)
self.player[self.config['player_variable']][step] = True
self.log.debug("Status: %s",
self.player[self.config['player_variable']])
if (self.player[self.config['player_variable']].count(True) ==
len(self.player[self.config['player_variable']])):
self.complete()
class Sequence(LogicBlock):
"""A type of LogicBlock which tracks many different events (steps) towards
a goal, with the steps having to happen in order.
"""
def __init__(self, machine, name, player, config):
self.log = logging.getLogger('Sequence.' + name)
self.log.debug("Creating Sequence LogicBlock")
super(Sequence, self).__init__(machine, name, player, config)
config_spec = '''
events: list_of_lists
'''
self.config = Config.process_config(config_spec=config_spec,
source=self.config)
if 'player_variable' not in config:
self.config['player_variable'] = self.name + '_step'
if not self.config['persist_state']:
self.player[self.config['player_variable']] = 0
def enable(self, step=0, **kwargs):
"""Enables this Sequence. Automatically called when one of the
'enable_events' is posted. Can also manually be called.
Args:
step: Step number this logic block will be at when it's enabled.
Default is 0.
Note the step numbers are zero-based.
"""
super(Sequence, self).enable()
if step:
self.player[self.config['player_variable']] = step
if self.player[self.config['player_variable']] >= (
len(self.config['events'])):
# hmm.. we're enabling, but we're done. So now what?
self.log.warning("Received request to enable at step %s, but this "
" Sequence only has %s step(s). Marking complete",
self.player[self.config['player_variable']],
len(self.config['events']))
return
# add the handlers for the current step
for event in (self.config['events']
[self.player[self.config['player_variable']]]):
self.handler_keys.add(
self.machine.events.add_handler(event, self.hit))
def hit(self, **kwargs):
"""Increases the hit progress towards completion. Automatically called
when one of the `count_events` is posted. Can also manually be
called.
"""
self.log.debug("Processing Hit")
# remove the event handlers for this step
self.machine.events.remove_handler(self.hit)
self.player[self.config['player_variable']] += 1
if self.player[self.config['player_variable']] >= (
len(self.config['events'])):
self.complete()
else:
# add the handlers for the new current step
for event in (self.config['events']
[self.player[self.config['player_variable']]]):
self.handler_keys.add(
self.machine.events.add_handler(event, self.hit))
def reset(self, **kwargs):
"""Resets the sequence back to the first step."""
super(Sequence, self).reset(**kwargs)
self.player[self.config['player_variable']] = 0
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
|
spierepf/mpf
|
mpf/system/logic_blocks.py
|
Python
|
mit
| 20,301
|
[
"Brian"
] |
4ac600fde5ff4cb6fa2e8bbd09a8eae5ee3abe0d8d1ee4a275f495e1b3d4aec8
|
#!/usr/bin/env python
#
# Copyright 2008-2010 by Brian Dominy <brian@oddchange.com>
#
# PYLDecider is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# PYLDecider is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PYLDecider; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# This script simulates games of "Press Your Luck" in order to determine
# strategy. The basic question it tries to solve is: given some situation
# in the middle of a game (in round 2) in which a player decision is
# required, what is the best decision for that player to make to maximize
# his chances of winning the game?
#
# The program works by following all possible paths from the initial state,
# computing best plays in each substate. There are actually two types
# of game states: decision states and spinning states. When deciding whether
# to play or pass, a decision is required. In these states, if we know the
# win probability for each of the choices, we assume that the player up
# will always make the best decision. The win probability is "bubbled up"
# from the optimal substate back to the previous state.
#
# The game also involves randomness on the game board which must be
# accounted for. When a player is spinning, we compute all possible
# outcomes from that spin, and their winning probabilities for the player.
# Then the overall win probability is a weighted sum of the substate
# probabilities.
#
# The simulation enforces all game rules correctly as far as when spins
# are allowed to be passed, and the order of play. Ties are not considered
# for the sake of simplicity.
#
####################################################################
# CONSTANTS
####################################################################
# The different types of states in the game tree:
# DECIDING_PLAY : requires a decision of play/pass
# SPINNING : a commitment to spin has been made, and the spin
# count already reduced to indicate "board in motion"
# FINISHED : end-of-game condition has been reached
DECIDING_PLAY, SPINNING, FINISHED = range(3)
# The possible options for DECIDING_PLAY:
SPIN, PASS = range(2)
####################################################################
# SIMULATION FUNCTIONS
####################################################################
def nspaces (n):
result = ""
for i in range(n):
result += " "
return result
def debug (*args):
if (1):
for arg in args:
print arg,
print
def choice_string (choice):
if (choice == SPIN):
return "spin"
elif (choice == PASS):
return "pass"
else:
return ""
####################################################################
# PRESS YOUR LUCK GAME IMPLEMENTATION
####################################################################
class Player:
MAX_WHAMMIES = 4
def __init__ (self, id):
"""Initialize a new player object."""
self.id = self.pos = id
self.score = 0
self.earned = 0
self.passed = 0
self.whammies = 0
def copy (self):
"""Return a modifiable copy of a player object."""
thecopy = Player (self.id)
thecopy.score = self.score
thecopy.earned = self.earned
thecopy.passed = self.passed
thecopy.whammies = self.whammies
return thecopy
def __str__ (self):
"""Return a printable version of a player object.
For the sake of brevity, all zero values except for the score
are omitted.
"""
result = "($" + str(self.score)
if self.earned:
result += " E" + str(self.earned)
if self.passed:
result += " P" + str(self.passed)
if self.whammies:
result += " W" + str(self.whammies)
result += ")"
return result
def total_spins (self):
"""Return the total number of spins a player has."""
return self.earned + self.passed
def in_game (self):
"""Return true if a player is still in the game."""
return self.whammies < Player.MAX_WHAMMIES
def take_spin (self):
"""Update when a player commits to taking a spin.
This will decrement passed spins if it is nonzero, else
it will decrement earned spins.
"""
if self.passed > 0:
self.passed -= 1
elif self.earned > 0:
self.earned -= 1
else:
raise RuntimeError ('No spin to take')
def add_earned (self, num):
self.earned += num
def add_passed (self, num):
self.passed += num
def award_whammy (self):
"""Apply a whammy to a player.
The score is zeroed, the whammy count is increased,
passed spins are converted to earned, and if the maximum
number of whammies has been reached, all spins are
taken away.
"""
self.score = 0
self.whammies += 1
self.earned += self.passed
self.passed = 0
if not self.in_game ():
self.earned = 0
class Result:
"""The base class for a board space.
All board spaces must implement these three methods. A Result object
can be thought of as an operator that can be applied to a Player."""
def is_whammy (self):
return False
def get_score (self):
return 0
def get_spins (self):
return 0
class CashResult(Result):
def __init__ (self, value):
self.value = value
def get_score (self):
return self.value
def get_name (self):
return "$" + str(self.value)
class WhammyResult(Result):
def is_whammy (self):
return True
def get_name (self):
return "whammy"
class CashSpinResult(CashResult):
def __init__ (self, value, spins = 1):
CashResult.__init__ (self, value)
self.spins = spins
def get_spins (self):
return self.spins
def get_name (self):
return "$" + str(self.value) + "+spin"
class PrizeResult(CashResult):
def __init__ (self):
pass
def get_score (self):
return 2300
def get_name (self):
return "PRIZE"
SimpleBoard = [ (0.16, WhammyResult ()),
(0.70, CashResult (2000)),
(0.14, CashSpinResult (1000, 1)) ]
class Board:
"""The description for an entire gameboard.
A Board object can be described in terms of the three fundamentally different
types of spaces: whammies, cash only, and cash-plus-spin spaces. Identical
board spaces are 'merged' during construction for optimization. Conceptually,
the Board stores each of the different spaces and the probability that each
will occur."""
def __init__ (self):
self.count = 0
# After the board is fully constructed, the spaces are separated into the
# 3 types. For whammies, only the overall probability is kept. For the other
# types, these are lists of (probability, value) tuples.
self.whammy_prob = 0
self.fixed = []
self.bonus = []
self.win_prob_cache = {}
def dump (self):
"""Dump a Board object for debugging purposes."""
print "Count = ", self.count
print "Whammy prob = ", self.whammy_prob
print "Fixed:", self.fixed
print "Bonus:", self.bonus
def add (self, *results):
"""Add a new space to the game board."""
for result in results:
if result.is_whammy ():
self.whammy_prob += 1.0
else:
count = 1.0
if result.get_spins () > 0:
list = self.bonus
else:
list = self.fixed
for e in list:
if (e[1] == result.get_score ()):
count = count + e[0]
list.remove (e)
element = (count, result.get_score ())
list.append (element)
# TODO - no merging of identical values is happening yet
#elif result.get_spins () > 0:
# self.bonus.append ((1.0, result.get_score ()))
#else:
# self.fixed.append ((1.0, result.get_score ()))
self.count += 1
def complete (self):
"""Say that all game board spaces have been added.
At this point, analysis of the game board can be done to
compute constant attributes which are needed repeatedly during
the simulation.
"""
self.whammy_prob = float(self.whammy_prob) / self.count
new_fixed = []
for fixed in self.fixed:
new_fixed.append ((fixed[0] / self.count, fixed[1]))
self.fixed = new_fixed
new_bonus = []
for bonus in self.bonus:
new_bonus.append ((bonus[0] / self.count, bonus[1]))
self.bonus = new_bonus
# For now
#self.whammy_prob = 0.16
#self.fixed = [ (0.7, 2000) ]
#self.bonus = [ (0.14, 1000) ]
# TODO - actually calculate this
self.total_bonus_prob = 0.14
self.max_fixed_value = 2500
def fixed_prob (self, n):
"""Return the percentage of board spaces that are a fixed value
(with no additional spin) and have value greater than n.
This probability is one component of the overall winning probability
in an endgame.
"""
prob = 0.0
for fixed in self.fixed:
if n <= fixed[1]:
prob += fixed[0]
return prob
def win_prob (self, lead):
"""Return the probability of winning with one spin to go.
'lead' is negative if the spinning player is behind, else positive.
Because of the nature of the gameboard, this probability does not
depend on the actual game scores, but only on the difference between
the player up and the higher opponent. Specifically, at most 1 extra
spin can be earned per turn, and dollar values with an extra spin are
nonnegative.
There are two components to winning probability: spaces that offer a
fixed value only, and spaces that offer an extra spin. The extra spin
spaces contribute probabilities that form a converging geometric series
over multiple spins. The result requires summing this geometric series
using a recursive call. The recursion is terminated by recognizing that
if the player's lead is greater than the maximum fixed dollar value on
the board, then extra spin spaces don't really change anything. This
case uses the formula for the sum of an infinite geometric series to
provide the sum without a recursive call.
"""
prob = self.fixed_prob (-lead)
if lead > self.max_fixed_value:
prob /= (1 - self.total_bonus_prob)
elif lead in self.win_prob_cache:
prob = self.win_prob_cache[lead]
else:
for bonus in self.bonus:
prob += bonus[0] * self.win_prob (lead + bonus[1])
self.win_prob_cache[lead] = prob
print "win_prob: lead=" + str(lead) + " is " + str(prob)
return prob
def test_win_prob (self):
lead = -6000
while lead <= 6000:
prob = self.win_prob (lead)
print "Lead = %d Win prob = %f" % (lead, prob)
lead += 1000
####################################################################
# STRATEGY SIMULATION CLASSES
####################################################################
class Situation:
pass
class FinalSituation(Situation):
pass
class DecisionSituation(Situation):
pass
class PrizeTargetSituation(Situation):
pass
class PassTargetSituation(Situation):
pass
class SpinningSituation(Situation):
pass
class WinVector:
pass
class Game:
id = 0
max_depth = 12
NUM_PLAYERS = 3
db = {}
def __init__ (self, board):
self.player = [Player(0), Player(1), Player(2)]
self.swap_vector = [ 0, 1, 2 ]
self.id = Game.id
self.best_choice = None
self.board = board
Game.id += 1
def initialize (self):
"""Complete initialization of a game object.
"""
if (self.action != SPINNING):
self.update_turn ()
def __str__ (self):
if (self.action == SPINNING):
string = "Spinning>"
elif (self.action == DECIDING_PLAY):
string = "Deciding>"
elif (self.action == FINISHED):
string = "Finished>"
string += " {"
for player in self.player:
string += str(player) + " "
string += "} ID" + str(self.id)
if self.best_choice == SPIN:
string += " (should spin)"
elif self.best_choice == PASS:
string += " (should pass)"
if self.action == SPINNING and self.total_spins () == 0:
string += " (final spin)"
return string
def id_string (self):
"""Return the ID string for this game state.
"""
return "ID" + str(self.id)
def hash_key (self):
"""Return a hash key for this game state.
The hash key is the minimal string that identifies whether
two game state objects are equal.
"""
key = str(self.action) + ':'
for player in self.player:
key += str(player.score) + '-'
key += str(player.earned) + '-' + str(player.passed) + '-'
key += str(player.whammies) + '-'
return key
def copy (self):
"""Return a modifiable copy of this game state.
"""
newgame = Game (self.board)
newgame.player = [self.player[0].copy (),
self.player[1].copy (),
self.player[2].copy () ]
newgame.action = self.action
return newgame
def player_up (self):
"""Return a reference to the active player in a game.
"""
return self.player[0]
def lower_opponent (self):
"""Return a reference to the active player's trailing opponent.
"""
return self.player[1]
def higher_opponent (self):
"""Return a reference to the active player's leading opponent.
"""
return self.player[2]
def total_spins (self):
"""Return the total number of spins for all players.
"""
return (self.player[0].total_spins () +
self.player[1].total_spins () +
self.player[2].total_spins ())
def is_terminal (self):
"""Return true if this is a terminal state; i.e. the game is over.
"""
return self.action != SPINNING and self.total_spins () == 0
def unswap (self, v):
"""Return a win vector, with its entries permuted based on
previous player swaps.
"""
w = [0, 0, 0]
for i in range(Game.NUM_PLAYERS):
w[i] = v[self.swap_vector[i]]
return w
def swap_players (self, a, b):
"""Swap the positions of two players.
The player list is always kept ordered based on the game situation.
Specifically, the order is always [ current, lower opponent,
higher opponent ]. When the order needs to be changed, this
function moves the players into the proper slots, and also makes
note that this was done, so that the original order can be restored
later.
"""
self.swap_vector[a], self.swap_vector[b] = self.swap_vector[b], self.swap_vector[a]
self.player[a], self.player[b] = self.player[b], self.player[a]
def update_turn (self):
"""Update after a change that might have changed the current player.
This function is invoked in two cases: after deciding to pass spins,
and after playing a spin. It is not called after deciding to play,
because the current player does not change in that case.
The next action required is also updated; if a decision is required,
then it is set to DECIDING_PLAY. When there are passed spins to be
played by the next player, that state is skipped entirely, and is
set to SPINNING. In the latter case, the spin count is also
decremented here for the upcoming spin.
This function also updates the order of the player objects, based
on the rules of the game (initial order of play and rules of
passing).
"""
if (self.is_terminal ()):
self.action = FINISHED
elif (self.player_up ().passed > 0):
self.play_spin ()
elif (self.player_up ().earned > 0):
self.action = DECIDING_PLAY
else:
# Otherwise, determine who the next player up should be.
# If player[1] has spins, then use him; else player[2].
if (self.player[1].total_spins () > 0):
self.swap_players (0, 1)
else:
self.swap_players (0, 2)
# Make sure that player[2] > player[1] always.
if (self.player[1].score > self.player[2].score):
self.swap_players (1, 2)
# Set the action based on the new player up.
if (self.player_up ().passed > 0):
self.play_spin ()
else:
self.action = DECIDING_PLAY
###### Operations on a SPINNING game state #########
def apply (self, result):
"""Apply a game board result to the current game state.
"""
if result.is_whammy ():
self.player_up().award_whammy ()
else:
self.player_up().score += result.get_score ()
self.player_up().earned += result.get_spins ()
self.update_turn ()
###### Operations on a DECIDING_PLAY game state #########
def play_spin (self):
"""Modify a decision state by choosing to play.
"""
if (self.player_up().total_spins == 0):
raise RuntimeError ('Cannot play spin')
self.action = SPINNING
self.player_up ().take_spin ()
def pass_spins (self):
"""Modify a decision state by choosing to pass.
"""
if (self.player_up().passed > 0):
raise RuntimeError ('Cannot pass spins')
self.higher_opponent().add_passed (self.player_up ().earned)
self.player_up ().earned = 0
self.update_turn ()
############# Simulation-only functions ####################
def log (self, depth, *args):
debug (nspaces(depth), self.id_string (), ":", *args)
def get_pessimistic_win_vector (self):
if (self.lower_opponent ().total_spins () == 0):
return [0.5, 0.0, 0.5]
else:
return [1.0/3, 1.0/3, 1.0/3]
def get_win_vector (self, depth = 0):
"""Return the win vector for a game state, which is the probability
of winning for each of the players.
This is the top-level interface to determine optimal strategy.
If the state has already been calculated and is in the cache, then
it is returned immediately. Otherwise it must be calculated and
saved.
"""
# TODO - keep statistics on the helpfulness of caching
key = self.hash_key ()
try:
v = Game.db[key]
#debug (nspaces(depth), self.id_string (), self, "(cached)", v)
self.log (depth, self, "(cached)", v)
except KeyError:
v = self.compute_win_vector (depth)
return v
def save_win_vector (self, v):
"""Save the state's win vector in cache after calculating it.
The maximum cache size is to limit memory usage.
"""
if (len (Game.db) < 25000):
key = self.hash_key ()
Game.db[key] = v
def obvious_choice (self):
"""Return a subset of the choices that are feasible for exploring.
In certain cases, it can be proven that a particular choice is
clearly good/bad. Instead of following all paths, the search can
be restricted to the 'obvious' choices.
"""
### Obvious rule #1:
# If you are in third place, and second place has no spins, you must
# PLAY. Passing would send the spins to the leader and completely
# take you out of the game.
###
if ((self.player_up().score < self.lower_opponent ().score) and
(self.lower_opponent ().total_spins () == 0)):
return SPIN
return None
def compute_win_vector (self, depth = 0):
"""Compute the win vector for a state by recursively checking each of
its successor states. The win vector is a 3-tuple where each element
indicates the probability of that player winning the game based on
absolute correct play by all players. The first element is always for the
player up.
"""
#debug (nspaces(depth), self.id_string (), ":", self, "depth", depth)
self.log (depth, self, "depth", depth)
# Initialize the result to all zeroes, meanning that the result is completely
# unknown.
v = [0.0, 0.0, 0.0]
# If this is a final state, then the win vector is absolutely
# determinable. Ties are not supported, so exactly one player
# is marked as the winner.
if self.action == FINISHED:
max = 0
winner = None
for p in range (Game.NUM_PLAYERS):
if self.player[p].whammies < Player.MAX_WHAMMIES and self.player[p].score > max:
max = self.player[p].score
winner = p
if winner != None:
v[winner] = 1.0
elif self.action == DECIDING_PLAY:
# Decide which choices to consider: normally, both PASS and SPIN, but
# possibly only one if it is obvious. ??? If only one choice, then do we even
# need to calculate the win vector?
choice = self.obvious_choice ()
if choice is not None:
choices = [ choice ]
else:
# TODO - handle pass_spins() when can choose target recipient.
choices = [ PASS, SPIN ]
# For each possible choice, generate a new state that makes the choice.
for choice in choices:
next_state = self.copy ()
if choice == SPIN:
next_state.play_spin ()
else:
next_state.pass_spins ()
debug (nspaces(depth), self.id_string (), "If choice ", choice_string (choice))
win_vector = next_state.unswap (next_state.get_win_vector (depth+1))
debug (nspaces(depth), self.id_string (), "Sub-vector ", win_vector)
# The key to computing a DECIDING_PLAY state: choose the
# substate that offers the best results for the player up.
if (win_vector[0] > v[0]):
v = win_vector
self.best_choice = choice
elif self.action == SPINNING:
if self.total_spins () == 0:
# This is a final spin. Replace infinite recursion by an
# exact calculation using a converging geometric series.
lead = self.player_up ().score - self.higher_opponent ().score
win_prob = self.board.win_prob (lead)
v = [ win_prob, 0, 1.0 - win_prob ]
debug (nspaces(depth), self.id_string (), "Final spin", v)
elif depth > Game.max_depth:
# This is not a final spin, but we have been recursing too
# deeply. At such depths, the result for this sub-state will
# have little effect on the result of the original state
# (at depth 0). Therefore, we can provide a rough, pessimistic
# result here -- that does not involve recursion of its
# sub-states -- and that will be good enough for the caller.
# TODO: However, states at depth-1, depth-2, etc. which are
# close to the maximum depth will not be as accurate either.
# These state values are getting cached and reused. So the
# cache entry needs to reflect that.
v = self.get_pessimistic_win_vector ()
#debug (nspaces(depth), self.id_string (), "(pessimistic)")
self.log (depth, "(pessimistic)")
else:
# The ordinary method to use on a spinning state is to generate
# all possible resulting states (based on all possible outcomes
# of a random spin), compute the win probability for each of them
# recursively, then combine all of those sub-results together
# using a weighted sum.
i = 0
for poss in SimpleBoard: # TODO - not using real game board yet
state = self.copy ()
state.apply (poss[1])
debug (nspaces(depth), self.id_string (), "After ", poss[1].get_name ())
state_win_vector = state.unswap (state.get_win_vector (depth+1))
debug (nspaces(depth), self.id_string (), "Sub-vector ", state_win_vector)
for p in range(Game.NUM_PLAYERS):
v[p] += state_win_vector[p] * poss[0]
i = i + 1
# Only in this case where we did a full exact calculation, do we
# cache that result for reuse later.
self.save_win_vector (v)
debug (nspaces (depth), self.id_string (), "Result:", v, choice_string (self.best_choice))
# The sum of the win probabilities per player should not exceed 1.00.
# Allow a little slack since floating-point is imprecise sometimes.
if (v[0] + v[1] + v[2]) > 1.05:
raise RuntimeError ('Invalid win vector')
return v
# def analyze (self, depth = 0):
# if (self.action == FINISHED):
# return self.analyze_final (depth)
# elif (self.action == DECISION_PLAY):
# return self.analyze_play_decision (depth)
# elif (self.action == SPINNING):
# return self.analyze_spin (depth)
# else:
# raise RuntimeError ('Invalid action')
#
#
# def analyze_final (self, depth = 0):
# pass
#
#
# def analyze_play_decision (self, depth = 0):
# pass
#
#
# def analyze_spin (self, depth = 0):
# pass
class Endgame(Game):
"""A subclass of Game that can more easily be initialized to a final state.
An EndGame object can be created in place of a Game object when there are
only two players remaining, with no passed spins and without consideration
of whammying out."""
def __init__ (self, score, opp_score, spins = 1):
Game.__init__ (self)
self.action = DECIDING_PLAY
self.player[0].score = score
self.player[0].earned = spins
self.player[2].score = opp_score
self.initialize ()
class EndgameTester:
"""A test class that runs a large number of Endgame() scenarios."""
def __init__ (self):
for score in range (0, 10000, 1000):
for opp_score in range (0, 10000, 1000):
for spins in range (1, 5):
game = Endgame(score, opp_score, spins)
game.get_win_vector ()
print game
class ClassicBoard(Board):
def __init__ (self):
Board.__init__ (self)
self.add (CashResult (1400), CashResult (1750), CashResult (2250))
self.add (PrizeResult (), CashResult (500), CashResult (1250))
self.add (WhammyResult (), CashResult (500), CashResult (2000))
self.add (CashSpinResult (3000), CashSpinResult (4000), CashSpinResult (5000))
self.add (WhammyResult (), PrizeResult (), CashResult (750))
self.add (CashSpinResult (700), CashSpinResult (4000), CashResult (1500))
self.add (WhammyResult (), PrizeResult (), CashResult (750))
self.add (CashSpinResult (500), CashSpinResult (750), CashSpinResult (1000))
self.add (WhammyResult (), CashResult (800), CashResult (1500))
self.add (CashResult (2000), CashResult (2500), PrizeResult ())
self.add (CashResult (1500), CashResult (2500), WhammyResult ())
self.add (CashResult (500), WhammyResult (), CashSpinResult (4000))
self.add (CashResult (2000), CashResult (2500), PrizeResult ())
self.add (CashResult (1500), WhammyResult (), CashSpinResult (1000))
self.add (CashSpinResult (1000), CashSpinResult (1500), PrizeResult ())
self.add (CashSpinResult (750), CashResult (600), CashResult (2500))
self.add (CashSpinResult (700), WhammyResult (), CashResult (600))
self.add (CashSpinResult (750), CashSpinResult (1000), WhammyResult ())
self.complete ()
self.dump ()
# Entry point into the simulation.
# Right now, it is hardcoded as to what to do... change the code below
# to do what you want.
if __name__ == '__main__':
# g = Endgame (2000, 5000)
# g.player_up ().whammies = 3
#EndgameTester ()
# First, you need to create a Board object. The subclass 'ClassicBoard'
# will give you an instance of one of the actual game boards used on the
# show for most of its run.
board=ClassicBoard ()
# For debugging, you can inspect the board object.
#board.dump ()
#board.test_win_prob ()
# Then you create a Game simulation, and tie it to the gameboard just created.
# Also you initialize the scores and spin counts of all players. The
# game action should always begin as DECIDING_PLAY. Call g.initialize()
# once everything has been set up.
g = Game (board)
g.action = DECIDING_PLAY
g.player[0].score = 5000
g.player[0].earned = 3
g.player[1].score = 2000
g.player[2].score = 10000
g.initialize ()
# The method 'get_win_vector' begins the simulation, and will return the
# probability of the player up winning the game. Note there is much logging
# during the simulation so lots more gets printed too.
print g.get_win_vector ()
print "------"
# The best play is saved in the game object, by printing it we will see
# what we are supposed to do.
print g
|
bcd/PYLDecider
|
PylDecider.py
|
Python
|
gpl-2.0
| 26,870
|
[
"Brian"
] |
6e465ec6c444658f284221c3cc0187e094a16fcbd53b2d8122b881df02c12f47
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import csv
import json
import os
import random
import unittest
import numpy as np
import scipy.constants as const
from monty.tempfile import ScratchDir
from pymatgen.analysis.diffusion_analyzer import (
DiffusionAnalyzer,
fit_arrhenius,
get_conversion_factor,
)
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.util.testing import PymatgenTest
class FuncTest(unittest.TestCase):
def test_get_conversion_factor(self):
filepath = os.path.join(PymatgenTest.TEST_FILES_DIR, "LiFePO4.cif")
s = Structure.from_file(filepath)
# large tolerance because scipy constants changed between 0.16.1 and 0.17
self.assertAlmostEqual(41370704.343540139, get_conversion_factor(s, "Li", 600), delta=20)
def test_fit_arrhenius(self):
Ea = 0.5
k = const.k / const.e
c = 12
temps = np.array([300, 1000, 500])
diffusivities = c * np.exp(-Ea / (k * temps))
diffusivities *= np.array([1.00601834013, 1.00803236262, 0.98609720824])
r = fit_arrhenius(temps, diffusivities)
self.assertAlmostEqual(r[0], Ea)
self.assertAlmostEqual(r[1], c)
self.assertAlmostEqual(r[2], 0.000895566)
# when not enough values for error estimate
r2 = fit_arrhenius([1, 2], [10, 10])
self.assertAlmostEqual(r2[0], 0)
self.assertAlmostEqual(r2[1], 10)
self.assertEqual(r2[2], None)
class DiffusionAnalyzerTest(PymatgenTest):
def test_init(self):
# Diffusion vasprun.xmls are rather large. We are only going to use a
# very small preprocessed run for testing. Note that the results are
# unreliable for short runs.
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "DiffusionAnalyzer.json")) as f:
dd = json.load(f)
d = DiffusionAnalyzer.from_dict(dd)
# large tolerance because scipy constants changed between 0.16.1 and 0.17
self.assertAlmostEqual(d.conductivity, 74.165372613735684, 4)
self.assertAlmostEqual(d.chg_conductivity, 232.8278799754324, 4)
self.assertAlmostEqual(d.diffusivity, 1.16083658794e-06, 7)
self.assertAlmostEqual(d.chg_diffusivity, 3.64565578208e-06, 7)
self.assertAlmostEqual(d.conductivity_std_dev, 0.0097244677795984488, 7)
self.assertAlmostEqual(d.diffusivity_std_dev, 9.1013023085561779e-09, 7)
self.assertAlmostEqual(d.chg_diffusivity_std_dev, 7.20911399729e-10, 5)
self.assertAlmostEqual(d.haven_ratio, 0.31854161048867402, 7)
self.assertArrayAlmostEqual(d.conductivity_components, [45.7903694, 26.1651956, 150.5406140], 3)
self.assertArrayAlmostEqual(
d.diffusivity_components,
[7.49601236e-07, 4.90254273e-07, 2.24649255e-06],
)
self.assertArrayAlmostEqual(d.conductivity_components_std_dev, [0.0063566, 0.0180854, 0.0217918])
self.assertArrayAlmostEqual(
d.diffusivity_components_std_dev,
[8.9465670e-09, 2.4931224e-08, 2.2636384e-08],
)
self.assertArrayAlmostEqual(d.mscd[0:4], [0.69131064, 0.71794072, 0.74315283, 0.76703961])
self.assertArrayAlmostEqual(
d.max_ion_displacements,
[
1.4620659693989553,
1.2787303484445025,
3.419618540097756,
2.340104469126246,
2.6080973517594233,
1.3928579365672844,
1.3561505956708932,
1.6699242923686253,
1.0352389639563648,
1.1662520093955808,
1.2322019205885841,
0.8094210554832534,
1.9917808504954169,
1.2684148391206396,
2.392633794162402,
2.566313049232671,
1.3175030435622759,
1.4628945430952793,
1.0984921286753002,
1.2864482076554093,
0.655567027815413,
0.5986961164605746,
0.5639091444309045,
0.6166004192954059,
0.5997911580422605,
0.4374606277579815,
1.1865683960470783,
0.9017064371676591,
0.6644840367853767,
1.0346375380664645,
0.6177630142863979,
0.7952002051914302,
0.7342686123054011,
0.7858047956905577,
0.5570732369065661,
1.0942937746885417,
0.6509372395308788,
1.0876687380413455,
0.7058162184725,
0.8298306317598585,
0.7813913747621343,
0.7337655232056153,
0.9057161616236746,
0.5979093093186919,
0.6830333586985015,
0.7926500894084628,
0.6765180009988608,
0.8555866032968998,
0.713087091642237,
0.7621007695790749,
],
)
self.assertEqual(d.sq_disp_ions.shape, (50, 206))
self.assertEqual(d.lattices.shape, (1, 3, 3))
self.assertEqual(d.mscd.shape, (206,))
self.assertEqual(d.mscd.shape, d.msd.shape)
self.assertAlmostEqual(d.max_framework_displacement, 1.18656839605)
ss = list(d.get_drift_corrected_structures(10, 1000, 20))
self.assertEqual(len(ss), 50)
n = random.randint(0, 49)
n_orig = n * 20 + 10
self.assertArrayAlmostEqual(
ss[n].cart_coords - d.structure.cart_coords + d.drift[:, n_orig, :],
d.disp[:, n_orig, :],
)
d = DiffusionAnalyzer.from_dict(d.as_dict())
self.assertIsInstance(d, DiffusionAnalyzer)
# Ensure summary dict is json serializable.
json.dumps(d.get_summary_dict(include_msd_t=True))
d = DiffusionAnalyzer(
d.structure,
d.disp,
d.specie,
d.temperature,
d.time_step,
d.step_skip,
smoothed="max",
)
self.assertAlmostEqual(d.conductivity, 74.165372613735684, 4)
self.assertAlmostEqual(d.diffusivity, 1.14606446822e-06, 7)
self.assertAlmostEqual(d.haven_ratio, 0.318541610489, 6)
self.assertAlmostEqual(d.chg_conductivity, 232.8278799754324, 4)
self.assertAlmostEqual(d.chg_diffusivity, 3.64565578208e-06, 7)
d = DiffusionAnalyzer(
d.structure,
d.disp,
d.specie,
d.temperature,
d.time_step,
d.step_skip,
smoothed=False,
)
self.assertAlmostEqual(d.conductivity, 27.20479170406027, 4)
self.assertAlmostEqual(d.diffusivity, 4.25976905436e-07, 7)
self.assertAlmostEqual(d.chg_diffusivity, 1.6666666666666667e-17, 3)
d = DiffusionAnalyzer(
d.structure,
d.disp,
d.specie,
d.temperature,
d.time_step,
d.step_skip,
smoothed="constant",
avg_nsteps=100,
)
self.assertAlmostEqual(d.conductivity, 47.404056230438741, 4)
self.assertAlmostEqual(d.diffusivity, 7.4226016496716148e-07, 7)
self.assertAlmostEqual(d.chg_conductivity, 1.06440821953e-09, 4)
# Can't average over 2000 steps because this is a 1000-step run.
self.assertRaises(
ValueError,
DiffusionAnalyzer,
d.structure,
d.disp,
d.specie,
d.temperature,
d.time_step,
d.step_skip,
smoothed="constant",
avg_nsteps=2000,
)
d = DiffusionAnalyzer.from_structures(
list(d.get_drift_corrected_structures()),
d.specie,
d.temperature,
d.time_step,
d.step_skip,
smoothed=d.smoothed,
avg_nsteps=100,
)
self.assertAlmostEqual(d.conductivity, 47.404056230438741, 4)
self.assertAlmostEqual(d.diffusivity, 7.4226016496716148e-07, 7)
d.export_msdt("test.csv")
with open("test.csv") as f:
data = []
for row in csv.reader(f):
if row:
data.append(row)
data.pop(0)
data = np.array(data, dtype=np.float64)
self.assertArrayAlmostEqual(data[:, 1], d.msd)
self.assertArrayAlmostEqual(data[:, -1], d.mscd)
os.remove("test.csv")
def test_init_npt(self):
# Diffusion vasprun.xmls are rather large. We are only going to use a
# very small preprocessed run for testing. Note that the results are
# unreliable for short runs.
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "DiffusionAnalyzer_NPT.json"), "r") as f:
dd = json.load(f)
d = DiffusionAnalyzer.from_dict(dd)
# large tolerance because scipy constants changed between 0.16.1 and 0.17
self.assertAlmostEqual(d.conductivity, 499.1504129387108, 4)
self.assertAlmostEqual(d.chg_conductivity, 1219.5959181678043, 4)
self.assertAlmostEqual(d.diffusivity, 8.40265434771e-06, 7)
self.assertAlmostEqual(d.chg_diffusivity, 2.05305709033e-05, 6)
self.assertAlmostEqual(d.conductivity_std_dev, 0.10368477696021029, 7)
self.assertAlmostEqual(d.diffusivity_std_dev, 9.1013023085561779e-09, 7)
self.assertAlmostEqual(d.chg_diffusivity_std_dev, 1.20834853646e-08, 6)
self.assertAlmostEqual(d.haven_ratio, 0.409275240679, 7)
self.assertArrayAlmostEqual(d.conductivity_components, [455.178101, 602.252644, 440.0210014], 3)
self.assertArrayAlmostEqual(
d.diffusivity_components,
[7.66242570e-06, 1.01382648e-05, 7.40727250e-06],
)
self.assertArrayAlmostEqual(d.conductivity_components_std_dev, [0.1196577, 0.0973347, 0.1525400])
self.assertArrayAlmostEqual(
d.diffusivity_components_std_dev,
[2.0143072e-09, 1.6385239e-09, 2.5678445e-09],
)
self.assertArrayAlmostEqual(
d.max_ion_displacements,
[
1.13147881,
0.79899554,
1.04153733,
0.96061850,
0.83039864,
0.70246715,
0.61365911,
0.67965179,
1.91973907,
1.69127386,
1.60568746,
1.35587641,
1.03280378,
0.99202692,
2.03359655,
1.03760269,
1.40228350,
1.36315080,
1.27414979,
1.26742035,
0.88199589,
0.97700804,
1.11323184,
1.00139511,
2.94164403,
0.89438909,
1.41508334,
1.23660358,
0.39322939,
0.54264064,
1.25291806,
0.62869809,
0.40846708,
1.43415505,
0.88891241,
0.56259128,
0.81712740,
0.52700441,
0.51011733,
0.55557882,
0.49131002,
0.66740277,
0.57798671,
0.63521025,
0.50277142,
0.52878021,
0.67803443,
0.81161269,
0.46486345,
0.47132761,
0.74301293,
0.79285519,
0.48789600,
0.61776836,
0.60695847,
0.67767756,
0.70972268,
1.08232442,
0.87871177,
0.84674206,
0.45694693,
0.60417985,
0.61652272,
0.66444583,
0.52211986,
0.56544134,
0.43311443,
0.43027547,
1.10730439,
0.59829728,
0.52270635,
0.72327608,
1.02919775,
0.84423208,
0.61694764,
0.72795752,
0.72957755,
0.55491631,
0.68507454,
0.76745343,
0.96346584,
0.66672645,
1.06810107,
0.65705843,
],
)
self.assertEqual(d.sq_disp_ions.shape, (84, 217))
self.assertEqual(d.lattices.shape, (1001, 3, 3))
self.assertEqual(d.mscd.shape, (217,))
self.assertEqual(d.mscd.shape, d.msd.shape)
self.assertAlmostEqual(d.max_framework_displacement, 1.43415505156)
ss = list(d.get_drift_corrected_structures(10, 1000, 20))
self.assertEqual(len(ss), 50)
n = random.randint(0, 49)
n_orig = n * 20 + 10
self.assertArrayAlmostEqual(
ss[n].cart_coords - d.structure.cart_coords + d.drift[:, n_orig, :],
d.disp[:, n_orig, :],
)
d = DiffusionAnalyzer.from_dict(d.as_dict())
self.assertIsInstance(d, DiffusionAnalyzer)
# Ensure summary dict is json serializable.
json.dumps(d.get_summary_dict(include_msd_t=True))
d = DiffusionAnalyzer(
d.structure,
d.disp,
d.specie,
d.temperature,
d.time_step,
d.step_skip,
smoothed="max",
)
self.assertAlmostEqual(d.conductivity, 499.1504129387108, 4)
self.assertAlmostEqual(d.diffusivity, 8.40265434771e-06, 7)
self.assertAlmostEqual(d.haven_ratio, 0.409275240679, 7)
self.assertAlmostEqual(d.chg_diffusivity, 2.05305709033e-05, 7)
d = DiffusionAnalyzer(
d.structure,
d.disp,
d.specie,
d.temperature,
d.time_step,
d.step_skip,
smoothed=False,
)
self.assertAlmostEqual(d.conductivity, 406.5964019770787, 4)
self.assertAlmostEqual(d.diffusivity, 6.8446082e-06, 7)
self.assertAlmostEqual(d.chg_diffusivity, 1.03585877962e-05, 6)
self.assertAlmostEqual(d.haven_ratio, 0.6607665413, 6)
d = DiffusionAnalyzer(
d.structure,
d.disp,
d.specie,
d.temperature,
d.time_step,
d.step_skip,
smoothed="constant",
avg_nsteps=100,
)
self.assertAlmostEqual(d.conductivity, 425.77884571149525, 4)
self.assertAlmostEqual(d.diffusivity, 7.167523809142514e-06, 7)
self.assertAlmostEqual(d.chg_diffusivity, 9.33480892187e-06, 6)
self.assertAlmostEqual(d.haven_ratio, 0.767827586952, 6)
self.assertAlmostEqual(d.chg_conductivity, 554.5240271992852, 6)
# Can't average over 2000 steps because this is a 1000-step run.
self.assertRaises(
ValueError,
DiffusionAnalyzer,
d.structure,
d.disp,
d.specie,
d.temperature,
d.time_step,
d.step_skip,
smoothed="constant",
avg_nsteps=2000,
)
d = DiffusionAnalyzer.from_structures(
list(d.get_drift_corrected_structures()),
d.specie,
d.temperature,
d.time_step,
d.step_skip,
smoothed=d.smoothed,
avg_nsteps=100,
)
self.assertAlmostEqual(d.conductivity, 425.7788457114952, 4)
self.assertAlmostEqual(d.diffusivity, 7.1675238091425148e-06, 7)
self.assertAlmostEqual(d.haven_ratio, 0.767827586952, 7)
self.assertAlmostEqual(d.chg_conductivity, 554.5240271992852, 6)
d.export_msdt("test.csv")
with open("test.csv") as f:
data = []
for row in csv.reader(f):
if row:
data.append(row)
data.pop(0)
data = np.array(data, dtype=np.float64)
self.assertArrayAlmostEqual(data[:, 1], d.msd)
self.assertArrayAlmostEqual(data[:, -1], d.mscd)
os.remove("test.csv")
def test_from_structure_NPT(self):
coords1 = np.array([[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]])
coords2 = np.array([[0.0, 0.0, 0.0], [0.6, 0.6, 0.6]])
coords3 = np.array([[0.0, 0.0, 0.0], [0.7, 0.7, 0.7]])
lattice1 = Lattice.from_parameters(a=2.0, b=2.0, c=2.0, alpha=90, beta=90, gamma=90)
lattice2 = Lattice.from_parameters(a=2.1, b=2.1, c=2.1, alpha=90, beta=90, gamma=90)
lattice3 = Lattice.from_parameters(a=2.0, b=2.0, c=2.0, alpha=90, beta=90, gamma=90)
s1 = Structure(coords=coords1, lattice=lattice1, species=["F", "Li"])
s2 = Structure(coords=coords2, lattice=lattice2, species=["F", "Li"])
s3 = Structure(coords=coords3, lattice=lattice3, species=["F", "Li"])
structures = [s1, s2, s3]
d = DiffusionAnalyzer.from_structures(
structures,
specie="Li",
temperature=500.0,
time_step=2.0,
step_skip=1,
smoothed=None,
)
self.assertArrayAlmostEqual(
d.disp[1],
np.array([[0.0, 0.0, 0.0], [0.21, 0.21, 0.21], [0.40, 0.40, 0.40]]),
)
if __name__ == "__main__":
unittest.main()
|
davidwaroquiers/pymatgen
|
pymatgen/analysis/tests/test_diffusion_analyzer.py
|
Python
|
mit
| 19,129
|
[
"pymatgen"
] |
5c56799df35e173a58e1a656d25369e028b24deea65b63532e654a3c9865b624
|
"""
========================================================================
Concentration Prior Type Analysis of Variation Bayesian Gaussian Mixture
========================================================================
This example plots the ellipsoids obtained from a toy dataset (mixture of three
Gaussians) fitted by the ``BayesianGaussianMixture`` class models with a
Dirichlet distribution prior
(``weight_concentration_prior_type='dirichlet_distribution'``) and a Dirichlet
process prior (``weight_concentration_prior_type='dirichlet_process'``). On
each figure, we plot the results for three different values of the weight
concentration prior.
The ``BayesianGaussianMixture`` class can adapt its number of mixture
components automatically. The parameter ``weight_concentration_prior`` has a
direct link with the resulting number of components with non-zero weights.
Specifying a low value for the concentration prior will make the model put most
of the weight on few components set the remaining components weights very close
to zero. High values of the concentration prior will allow a larger number of
components to be active in the mixture.
The Dirichlet process prior allows to define an infinite number of components
and automatically selects the correct number of components: it activates a
component only if it is necessary.
On the contrary the classical finite mixture model with a Dirichlet
distribution prior will favor more uniformly weighted components and therefore
tends to divide natural clusters into unnecessary sub-components.
"""
# Author: Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from sklearn.mixture import BayesianGaussianMixture
print(__doc__)
def plot_ellipses(ax, weights, means, covars):
for n in range(means.shape[0]):
eig_vals, eig_vecs = np.linalg.eigh(covars[n])
unit_eig_vec = eig_vecs[0] / np.linalg.norm(eig_vecs[0])
angle = np.arctan2(unit_eig_vec[1], unit_eig_vec[0])
# Ellipse needs degrees
angle = 180 * angle / np.pi
# eigenvector normalization
eig_vals = 2 * np.sqrt(2) * np.sqrt(eig_vals)
ell = mpl.patches.Ellipse(means[n], eig_vals[0], eig_vals[1],
180 + angle, edgecolor='black')
ell.set_clip_box(ax.bbox)
ell.set_alpha(weights[n])
ell.set_facecolor('#56B4E9')
ax.add_artist(ell)
def plot_results(ax1, ax2, estimator, X, y, title, plot_title=False):
ax1.set_title(title)
ax1.scatter(X[:, 0], X[:, 1], s=5, marker='o', color=colors[y], alpha=0.8)
ax1.set_xlim(-2., 2.)
ax1.set_ylim(-3., 3.)
ax1.set_xticks(())
ax1.set_yticks(())
plot_ellipses(ax1, estimator.weights_, estimator.means_,
estimator.covariances_)
ax2.get_xaxis().set_tick_params(direction='out')
ax2.yaxis.grid(True, alpha=0.7)
for k, w in enumerate(estimator.weights_):
ax2.bar(k, w, width=0.9, color='#56B4E9', zorder=3,
align='center', edgecolor='black')
ax2.text(k, w + 0.007, "%.1f%%" % (w * 100.),
horizontalalignment='center')
ax2.set_xlim(-.6, 2 * n_components - .4)
ax2.set_ylim(0., 1.1)
ax2.tick_params(axis='y', which='both', left=False,
right=False, labelleft=False)
ax2.tick_params(axis='x', which='both', top=False)
if plot_title:
ax1.set_ylabel('Estimated Mixtures')
ax2.set_ylabel('Weight of each component')
# Parameters of the dataset
random_state, n_components, n_features = 2, 3, 2
colors = np.array(['#0072B2', '#F0E442', '#D55E00'])
covars = np.array([[[.7, .0], [.0, .1]],
[[.5, .0], [.0, .1]],
[[.5, .0], [.0, .1]]])
samples = np.array([200, 500, 200])
means = np.array([[.0, -.70],
[.0, .0],
[.0, .70]])
# mean_precision_prior= 0.8 to minimize the influence of the prior
estimators = [
("Finite mixture with a Dirichlet distribution\nprior and "
r"$\gamma_0=$", BayesianGaussianMixture(
weight_concentration_prior_type="dirichlet_distribution",
n_components=2 * n_components, reg_covar=0, init_params='random',
max_iter=1500, mean_precision_prior=.8,
random_state=random_state), [0.001, 1, 1000]),
("Infinite mixture with a Dirichlet process\n prior and" r"$\gamma_0=$",
BayesianGaussianMixture(
weight_concentration_prior_type="dirichlet_process",
n_components=2 * n_components, reg_covar=0, init_params='random',
max_iter=1500, mean_precision_prior=.8,
random_state=random_state), [1, 1000, 100000])]
# Generate data
rng = np.random.RandomState(random_state)
X = np.vstack([
rng.multivariate_normal(means[j], covars[j], samples[j])
for j in range(n_components)])
y = np.concatenate([np.full(samples[j], j, dtype=int)
for j in range(n_components)])
# Plot results in two different figures
for (title, estimator, concentrations_prior) in estimators:
plt.figure(figsize=(4.7 * 3, 8))
plt.subplots_adjust(bottom=.04, top=0.90, hspace=.05, wspace=.05,
left=.03, right=.99)
gs = gridspec.GridSpec(3, len(concentrations_prior))
for k, concentration in enumerate(concentrations_prior):
estimator.weight_concentration_prior = concentration
estimator.fit(X)
plot_results(plt.subplot(gs[0:2, k]), plt.subplot(gs[2, k]), estimator,
X, y, r"%s$%.1e$" % (title, concentration),
plot_title=k == 0)
plt.show()
|
glemaitre/scikit-learn
|
examples/mixture/plot_concentration_prior.py
|
Python
|
bsd-3-clause
| 5,695
|
[
"Gaussian"
] |
da78b701898915e870cbca9aa15f72f373486bd19eb0ff8e6dc48e4edc132dff
|
# Copyright 2008-2009 Brian Boyer, Ryan Mark, Angela Nitzke, Joshua Pollock,
# Stuart Tiffen, Kayla Webley and the Medill School of Journalism, Northwestern
# University.
#
# This file is part of django-facebookconnect.
#
# django-facebookconnect is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# django-facebookconnect is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with django-facebookconnect. If not, see <http://www.gnu.org/licenses/>.
from facebookconnect.models import FacebookProfile
from django.contrib import admin
admin.site.register(FacebookProfile)
|
ryszard/django-facebookconnect
|
facebookconnect/admin.py
|
Python
|
gpl-3.0
| 1,007
|
[
"Brian"
] |
36d0769e1189a38803c3d07045fb0c88729fa392ebd32605b9eb03db7e68d104
|
# -*- Mode: Python; coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2005-2011 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
""" Main gui definition for admin application. """
import logging
import operator
import glib
import gtk
from stoqlib.api import api
from stoqlib.domain.invoice import InvoiceLayout
from stoqlib.gui.dialogs.clientcategorydialog import ClientCategoryDialog
from stoqlib.gui.dialogs.devices import DeviceSettingsDialog
from stoqlib.gui.editors.formfieldeditor import FormFieldEditor
from stoqlib.gui.dialogs.invoicedialog import (InvoiceLayoutDialog,
InvoicePrinterDialog)
from stoqlib.gui.dialogs.paymentcategorydialog import PaymentCategoryDialog
from stoqlib.gui.dialogs.paymentmethod import PaymentMethodsDialog
from stoqlib.gui.dialogs.personmergedialog import PersonMergeDialog
from stoqlib.gui.dialogs.pluginsdialog import PluginManagerDialog
from stoqlib.gui.dialogs.sellabledialog import SellableTaxConstantsDialog
from stoqlib.gui.dialogs.sintegradialog import SintegraDialog
from stoqlib.gui.editors.personeditor import UserEditor
from stoqlib.gui.search.costcentersearch import CostCenterSearch
from stoqlib.gui.search.eventsearch import EventSearch
from stoqlib.gui.search.fiscalsearch import CfopSearch, FiscalBookEntrySearch
from stoqlib.gui.search.parametersearch import ParameterSearch
from stoqlib.gui.search.gridsearch import (GridGroupSearch,
GridAttributeSearch)
from stoqlib.gui.search.personsearch import (ClientSearch,
EmployeeRoleSearch,
EmployeeSearch,
BranchSearch,
SupplierSearch,
TransporterSearch,
UserSearch)
from stoqlib.gui.search.profilesearch import UserProfileSearch
from stoqlib.gui.search.stationsearch import StationSearch
from stoqlib.gui.search.salesearch import SaleTokenSearch
from stoqlib.gui.search.taxclasssearch import TaxTemplatesSearch
from stoqlib.gui.stockicons import (
STOQ_CALC, STOQ_ADMIN_APP, STOQ_CLIENTS, STOQ_DEVICES, STOQ_DELIVERY,
STOQ_DOCUMENTS, STOQ_EDIT, STOQ_FORMS, STOQ_HR, STOQ_MONEY,
STOQ_PAYABLE_APP, STOQ_PLUGIN, STOQ_SUPPLIERS, STOQ_SYSTEM, STOQ_TAXES,
STOQ_USER_PROFILES, STOQ_USERS)
from stoqlib.gui.utils.keybindings import get_accels
from stoqlib.gui.wizards.personwizard import run_person_role_dialog
from stoqlib.lib.decorators import public
from stoqlib.lib.message import info
from stoqlib.lib.permissions import PermissionManager
from stoqlib.lib.translation import locale_sorted, stoqlib_gettext
from stoq.gui.shell.shellapp import ShellApp
_ = stoqlib_gettext
logger = logging.getLogger(__name__)
(COL_LABEL,
COL_NAME,
COL_PIXBUF) = range(3)
class Tasks(object):
def __init__(self, app):
self.app = app
self.theme = gtk.icon_theme_get_default()
def set_model(self, model):
self.model = model
self.model.clear()
def add_defaults(self):
items = [
(_('Branches'), 'branches', gtk.STOCK_HOME),
(_('C.F.O.P.'), 'cfop', STOQ_CALC),
(_('Client Categories'), 'client_categories', STOQ_CLIENTS),
(_('Clients'), 'clients', STOQ_CLIENTS),
(_('Computers'), 'stations', STOQ_SYSTEM),
(_('Devices'), 'devices', STOQ_DEVICES),
(_('Employees'), 'employees', STOQ_ADMIN_APP),
(_('Events'), 'events', gtk.STOCK_DIALOG_WARNING),
(_('Fiscal Books'), 'fiscal_books', STOQ_EDIT),
(_('Forms'), 'forms', STOQ_FORMS),
(_('Invoice Printers'), 'invoice_printers', gtk.STOCK_PRINT),
(_('Parameters'), 'parameters', gtk.STOCK_PREFERENCES),
(_('Payment Categories'), 'payment_categories', STOQ_PAYABLE_APP),
(_('Payment Methods'), 'payment_methods', STOQ_MONEY),
(_('Plugins'), 'plugins', STOQ_PLUGIN),
(_('Roles'), 'employee_roles', STOQ_USERS),
(_('Taxes'), 'taxes', STOQ_TAXES),
(_('Suppliers'), 'suppliers', STOQ_SUPPLIERS),
(_('Tax Classes'), 'tax_templates', STOQ_DOCUMENTS),
(_('Transporters'), 'transporters', STOQ_DELIVERY),
(_('User Profiles'), 'user_profiles', STOQ_USER_PROFILES),
(_('Users'), 'users', STOQ_HR),
]
for label, name, pixbuf in locale_sorted(
items, key=operator.itemgetter(0)):
self.add_item(label, name, pixbuf)
@public(since="1.5.0")
def add_item(self, label, name, pixbuf=None, cb=None):
"""
@param label: Label to show in the interface
@param name: Name to use to store internally and use by callbacks
@param pixbuf: a pixbuf or stock-id/icon-name for the item
@param cb: callback
"""
if type(pixbuf) == str:
stock_id = pixbuf
try:
pixbuf = self.theme.load_icon(pixbuf, 32, 0)
except glib.GError:
pixbuf = self.app.get_toplevel().render_icon(pixbuf, gtk.ICON_SIZE_DIALOG)
if pixbuf is not None:
pixbuf.stock_id = stock_id
self.model.append([label, name, pixbuf])
if cb is not None:
setattr(self, '_open_' + name, cb)
def on_item_activated(self, icon_view, path):
name = self.model[path][COL_NAME]
self.run_task(name)
def hide_item(self, name):
for row in self.model:
if row[COL_NAME] == name:
del self.model[row.iter]
break
def run_task(self, name):
func = getattr(self, '_open_%s' % name, None)
if not func:
logger.info("Couldn't open dialog: %r" % (name, ))
return
logger.info("Opening dialog: %r" % (name, ))
func()
def _open_branches(self):
self.app.run_dialog(BranchSearch, self.app.store,
hide_footer=True)
def _open_clients(self):
self.app.run_dialog(ClientSearch, self.app.store)
def _open_client_categories(self):
store = api.new_store()
model = self.app.run_dialog(ClientCategoryDialog, store)
store.confirm(model)
store.close()
def _open_devices(self):
self.app.run_dialog(DeviceSettingsDialog, self.app.store)
def _open_employees(self):
self.app.run_dialog(EmployeeSearch, self.app.store)
def _open_employee_roles(self):
self.app.run_dialog(EmployeeRoleSearch, self.app.store)
def _open_events(self):
self.app.run_dialog(EventSearch, self.app.store)
def _open_forms(self):
store = api.new_store()
model = self.app.run_dialog(FormFieldEditor, store)
store.confirm(model)
store.close()
def _open_sale_token(self):
with api.new_store() as store:
self.app.run_dialog(SaleTokenSearch, store, hide_footer=True)
def _open_fiscal_books(self):
self.app.run_dialog(FiscalBookEntrySearch, self.app.store,
hide_footer=True)
def _open_invoice_layouts(self):
store = api.new_store()
model = self.app.run_dialog(InvoiceLayoutDialog, store)
store.confirm(model)
store.close()
def _open_invoice_printers(self):
if self.app.store.find(InvoiceLayout).is_empty():
info(_("You must create at least one invoice layout "
"before adding an invoice printer"))
return
store = api.new_store()
model = self.app.run_dialog(InvoicePrinterDialog, store)
store.confirm(model)
store.close()
def _open_payment_categories(self):
store = api.new_store()
model = self.app.run_dialog(PaymentCategoryDialog, store)
store.confirm(model)
store.close()
def _open_payment_methods(self):
store = api.new_store()
model = self.app.run_dialog(PaymentMethodsDialog, store)
store.confirm(model)
store.close()
def _open_parameters(self):
store = api.new_store()
model = self.app.run_dialog(ParameterSearch, store)
store.confirm(model)
store.close()
def _open_plugins(self):
store = api.new_store()
model = self.app.run_dialog(PluginManagerDialog, store)
store.confirm(model)
store.close()
def _open_cfop(self):
self.app.run_dialog(CfopSearch, self.app.store, hide_footer=True)
def _open_sintegra(self):
branch = api.get_current_branch(self.app.store)
if branch.manager is None:
info(_(
"You must define a manager to this branch before you can create"
" a sintegra archive"))
return
self.app.run_dialog(SintegraDialog, self.app.store)
def _open_stations(self):
self.app.run_dialog(StationSearch, self.app.store, hide_footer=True)
def _open_suppliers(self):
self.app.run_dialog(SupplierSearch, self.app.store)
def _open_taxes(self):
self.app.run_dialog(SellableTaxConstantsDialog, self.app.store)
def _open_tax_templates(self):
self.app.run_dialog(TaxTemplatesSearch, self.app.store)
def _open_transporters(self):
self.app.run_dialog(TransporterSearch, self.app.store)
def _open_users(self):
self.app.run_dialog(UserSearch, self.app.store)
def _open_user_profiles(self):
self.app.run_dialog(UserProfileSearch, self.app.store)
def _open_grid_group(self):
self.app.run_dialog(GridGroupSearch, self.app.store)
def _open_grid_attribute(self):
self.app.run_dialog(GridAttributeSearch, self.app.store)
def _open_ui_form(self):
self._open_forms()
def _open_token(self):
self._open_sale_token()
class AdminApp(ShellApp):
app_title = _('Administrative')
gladefile = "admin"
ACTION_TASKS = {
'SearchRole': 'employee_roles',
'SearchEmployee': 'employees',
'SearchFiscalBook': 'fiscal_books',
'SearchCfop': 'cfop',
'SearchUserProfile': 'user_profiles',
'SearchUser': 'users',
'SearchBranch': 'branches',
'SearchComputer': 'stations',
'SearchTaxTemplate': 'tax_templates',
'SearchEvents': 'events',
'ConfigureGridGroup': 'grid_group',
'ConfigureGridAttribute': 'grid_attribute',
'ConfigureDevices': 'devices',
'ConfigurePaymentMethods': 'payment_methods',
'ConfigurePaymentCategories': 'payment_categories',
'ConfigureClientCategories': 'client_categories',
'ConfigureTaxes': 'taxes',
'ConfigureUIForm': 'ui_form',
'ConfigureSaleToken': 'sale_token',
'ConfigureSintegra': 'sintegra',
'ConfigureParameters': 'parameters',
'ConfigureInvoices': 'invoice_layouts',
'ConfigureInvoicePrinters': 'invoice_printers',
'ConfigurePlugins': 'plugins',
}
action_permissions = {
'ConfigureInvoices': ('InvoiceLayout', PermissionManager.PERM_SEARCH),
'ConfigureInvoicePrinters': ('InvoicePrinter', PermissionManager.PERM_SEARCH),
'SearchTaxTemplate': ('ProductTaxTemplate', PermissionManager.PERM_SEARCH),
}
#
# Application
#
def create_actions(self):
group = get_accels('app.admin')
actions = [
("SearchRole", None, _("Roles..."),
group.get('search_roles')),
("SearchEmployee", None, _("Employees..."),
group.get('search_employees')),
("SearchEvents", None, _("Events..."),
group.get('search_events')),
("SearchCostCenters", None, _("Cost Centers..."),
group.get('search_cost_centers')),
("SearchDuplicatedPersons", None, _("Duplicated Persons..."),
None),
("SearchCfop", None, _("C.F.O.P..."),
group.get('search_cfop')),
("SearchFiscalBook", None, _("Fiscal books..."),
group.get('search_fiscalbook')),
("SearchUserProfile", None, _("Profiles..."),
group.get('search_profile')),
("SearchUser", None, _("Users..."),
group.get('search_users')),
("SearchBranch", None, _("Branches..."),
group.get('search_branches')),
("SearchComputer", None, _('Computers...'),
group.get('search_computers')),
("SearchTaxTemplate", None, _('Tax Classes...')),
("ConfigureMenu", None, _("_Configure")),
("ConfigureDevices", None, _("Devices..."),
group.get('config_devices')),
("ConfigureGridGroup", None, _("Attribute Group...")),
("ConfigureGridAttribute", None, _("Grid Attribute...")),
("ConfigurePaymentMethods", None, _("Payment methods..."),
group.get('config_payment_methods')),
("ConfigurePaymentCategories", None, _("Payment categories..."),
group.get('config_payment_categories')),
("ConfigureClientCategories", None, _("Client categories..."),
group.get('config_client_categories')),
("ConfigureInvoices", None, _("Invoices..."),
group.get('config_invoices')),
("ConfigureInvoicePrinters", None, _("Invoice printers..."),
group.get('config_invoice_printers')),
("ConfigureSintegra", None, _("Sintegra..."),
group.get('config_sintegra')),
("ConfigurePlugins", None, _("Plugins...")),
("ConfigureUIForm", None, _("Forms...")),
("ConfigureTaxes", None, _("Taxes..."),
group.get('config_taxes')),
("ConfigureSaleToken", None, _("Sale tokens...")),
("ConfigureParameters", None, _("Parameters..."),
group.get('config_parameters')),
("NewUser", None, _("User..."), '',
_("Create a new user")),
]
self.admin_ui = self.add_ui_actions('', actions,
filename='admin.xml')
self.set_help_section(_("Admin help"), 'app-admin')
def create_ui(self):
self.tasks = Tasks(self)
self.tasks.set_model(self.model)
self.tasks.add_defaults()
self.model.set_sort_column_id(COL_LABEL, gtk.SORT_ASCENDING)
self.iconview.set_text_column(COL_LABEL)
self.iconview.set_pixbuf_column(COL_PIXBUF)
self.iconview.connect('item-activated', self.tasks.on_item_activated)
self.iconview.select_path(self.model[0].path)
# Connect releated actions and tasks and hide task if action is hidden
for action_name, task in self.ACTION_TASKS.items():
action = getattr(self, action_name)
action.connect('activate', self._on_action__activate, task)
if not action.get_visible():
self.tasks.hide_item(task)
def activate(self, refresh=True):
# Admin app doesn't have anything to print/export
for widget in [self.window.Print,
self.window.ExportSpreadSheet]:
widget.set_visible(False)
self.window.add_new_items([self.NewUser])
self.window.add_search_items([self.SearchUser,
self.SearchEmployee])
self.window.NewToolItem.set_tooltip(
_("Create a new user"))
self.window.SearchToolItem.set_tooltip(
_("Search for users"))
def deactivate(self):
self.uimanager.remove_ui(self.admin_ui)
def setup_focus(self):
self.iconview.grab_focus()
def new_activate(self):
self._new_user()
def search_activate(self):
self.tasks.run_task('users')
# Private
def _new_user(self):
store = api.new_store()
model = run_person_role_dialog(UserEditor, self, store)
store.confirm(model)
store.close()
#
# Callbacks
#
def _on_action__activate(self, action, task):
self.tasks.run_task(task)
# New
def on_NewUser__activate(self, action):
self._new_user()
# TODO: Create an task for this. I still need to find a proper metaphor
# for the icon
def on_SearchCostCenters__activate(self, action):
self.run_dialog(CostCenterSearch, self.store)
def on_SearchDuplicatedPersons__activate(self, action):
self.run_dialog(PersonMergeDialog, self.store)
|
tiagocardosos/stoq
|
stoq/gui/admin.py
|
Python
|
gpl-2.0
| 17,432
|
[
"VisIt"
] |
04bb4fdbe91afbfc7d726aaf55e31066b43476a1b24a2db8990b1c8e64dc4653
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import print_function
"""Module with utility classes and functions related
to data tables and text.
"""
import sys
import re
from psi4 import core
from psi4.driver import constants
from .exceptions import *
class Table(object):
"""Class defining a flexible Table object for storing data."""
def __init__(self, rows=(),
row_label_width=10,
row_label_precision=4,
cols=(),
width=16, precision=10):
self.row_label_width = row_label_width
self.row_label_precision = row_label_precision
self.width = width
self.precision = precision
self.rows = rows
if isinstance(cols, str):
self.cols = (cols,)
else:
self.cols = cols
self.labels = []
self.data = []
def format_label(self):
"""Function to pad the width of Table object labels."""
#str = lambda x: (('%%%d.%df' % (self.row_label_width, self.row_label_precision)) % x)
str = lambda x: (('%%%ds' % (self.row_label_width)) % x)
return " ".join(map(str, self.labels))
def format_values(self, values):
"""Function to pad the width of Table object data cells."""
str = lambda x: (('%%%d.%df' % (self.width, self.precision)) % x)
return " ".join(map(str, values))
def __getitem__(self, value):
self.labels.append(value)
return self
def __setitem__(self, name, value):
self.labels.append(name)
label = self.format_label()
self.labels = []
if isinstance(value, list):
self.data.append((label, value))
else:
self.data.append((label, [value]))
def save(self, file):
"""Function to save string of the Table object to *file*."""
import pickle
pickle_str = pickle.dumps(self)
fileobj = open(file, "w")
fileobj.write(str(self))
fileobj.close()
def __str__(self):
rowstr = lambda x: '%%%ds' % self.row_label_width % x
colstr = lambda x: '%%%ds' % self.width % x
lines = []
table_header = ""
if isinstance(self.rows, str):
table_header += "%%%ds" % self.row_label_width % self.rows
else:
table_header += " ".join(map(rowstr, self.rows))
table_header += " ".join(map(colstr, self.cols))
lines.append(table_header)
for datarow in self.data:
#print datarow
row_data = datarow[0]
row_data += self.format_values(datarow[1])
lines.append(row_data)
return "\n".join(lines) + "\n"
def copy(self):
"""Function to return a copy of the Table object."""
import copy
return copy.deepcopy(self)
def absolute_to_relative(self, Factor=constants.hartree2kcalmol):
"""Function to shift the data of each column of the Table object
such that the lowest value is zero. A scaling factor of *Factor* is applied.
"""
import copy
if len(self.data) == 0:
return
current_min = list(copy.deepcopy(self.data[0][1]))
for datarow in self.data:
for col in range(0, len(datarow[1])):
if current_min[col] > datarow[1][col]:
current_min[col] = datarow[1][col]
for datarow in self.data:
for col in range(0, len(datarow[1])):
#print datarow[1][col]
datarow[1][col] = (datarow[1][col] - current_min[col]) * Factor
def scale(self, Factor=constants.hartree2kcalmol):
"""Function to apply a scaling factor *Factor* to the
data of the Table object.
"""
if len(self.data) == 0:
return
for datarow in self.data:
for col in range(0, len(datarow[1])):
#print datarow[1][col]
datarow[1][col] = datarow[1][col] * Factor
def banner(text, type=1, width=35, strNotOutfile=False):
"""Function to print *text* to output file in a banner of
minimum width *width* and minimum three-line height for
*type* = 1 or one-line height for *type* = 2. If *strNotOutfile*
is True, function returns string rather than printing it
to output file.
"""
lines = text.split('\n')
max_length = 0
for line in lines:
if (len(line) > max_length):
max_length = len(line)
max_length = max([width, max_length])
null = ''
if type == 1:
banner = ' //' + null.center(max_length, '>') + '//\n'
for line in lines:
banner += ' //' + line.center(max_length) + '//\n'
banner += ' //' + null.center(max_length, '<') + '//\n'
if type == 2:
banner = ''
for line in lines:
banner += (' ' + line + ' ').center(max_length, '=')
if strNotOutfile:
return banner
else:
core.print_out(banner)
def print_stdout(stuff):
"""Function to print *stuff* to standard output stream."""
print(stuff, file=sys.stdout)
def print_stderr(stuff):
"""Function to print *stuff* to standard error stream."""
print(stuff, file=sys.stderr)
def levenshtein(seq1, seq2):
"""Function to compute the Levenshtein distance between two strings."""
oneago = None
thisrow = list(range(1, len(seq2) + 1)) + [0]
for x in range(len(seq1)):
twoago, oneago, thisrow = oneago, thisrow, [0] * len(seq2) + [x + 1]
for y in range(len(seq2)):
delcost = oneago[y] + 1
addcost = thisrow[y - 1] + 1
subcost = oneago[y - 1] + (seq1[x] != seq2[y])
thisrow[y] = min(delcost, addcost, subcost)
return thisrow[len(seq2) - 1]
def find_approximate_string_matches(seq1,options,max_distance):
"""Function to compute approximate string matches from a list of options."""
matches = []
for seq2 in options:
distance = levenshtein(seq1,seq2)
if distance <= max_distance:
matches.append(seq2)
return matches
|
andysim/psi4
|
psi4/driver/p4util/text.py
|
Python
|
gpl-2.0
| 7,028
|
[
"Psi4"
] |
3226109478237391ec11e57ea28d3647b70d9582431dddba733b9ced22f10e5e
|
#!/usr/bin/env python
# encoding: utf-8
# ======================================================================
# Package: pyclaw.plotters
# File: data.py
# Created: Aug 7, 2009
# Author: R.J. LeVeque
# ============================================================================
"""
Plotting Data Module
Contains the general class definition and the subclasses of the Clawpack
data objects specific to plotting.
"""
import os
import copy
import re
import logging
from pyclaw.data import Data
# ============================================================================
# Subclass ClawPlotData containing data for plotting results
# ============================================================================
class ClawPlotData(Data):
"""ClawPlotData class
Data subclass containing plot data.
"""
# ========== Initialization routine ======================================
def __init__(self, data_files=[], controller=None):
"""Initialize a PlotData object
Accepts a list of data_files to be read into and instantiate into one
ClawPlotData object. An empty object can be created by not passing
anything in for the data_file_list
"""
plot_attrs = ['rundir','plotdir','outdir','overwrite','plotter',
'msgfile','printfig_format','afterframe',
'beforeframe','mapc2p',
'html_framenos','html_fignos','html_fignames',
'ndim','clear_figs', 'setplot', 'eagle',
'plotitem_dict', 'html_movies', 'params']
# Initialize the data object and read the data files
super(ClawPlotData,self).__init__(data_files,plot_attrs)
# default values of attributes:
if controller:
controller.plotdata = self
# inherit some values from controller
self.rundir = copy.copy(controller.rundir)
self.outdir = copy.copy(controller.outdir)
else:
self.rundir = os.getcwd() # uses *.data from rundir
self.outdir = os.getcwd() # where to find fort.* files
self.format = 'ascii'
self.plotdir = os.getcwd() # directory for plots *.png, *.html
self.overwrite = True # ok to overwrite old plotdir?
self.plotter = 'matplotlib' # backend for plots
self.msgfile = '' # where to write error messages
self.verbose = True # verbose output?
self.ion = False # call ion() or ioff()?
self.user = Data() # for user to pass things into
# afterframe, for example
# Deprecated.
self.printfigs = True
self.print_format = 'png'
self.print_framenos = 'all' # which frames to plot
self.print_gaugenos = 'all' # which gauges to plot
self.print_fignos = 'all' # which figures to plot each frame
self.iplotclaw_fignos = 'all' # which figures to plot interactively
self.latex = True # make latex files for figures
self.latex_fname = 'plots' # name of latex file
self.latex_title = 'Clawpack Results'
self.latex_framesperpage = 'all' # number of frames on each page
self.latex_framesperline = 2 # number of frames on each line
self.latex_figsperline = 'all' # number of figures on each line
self.latex_makepdf = False # run pdflatex on latex file
self.html = True # make html files for figures
self.html_index_fname = '_PlotIndex.html' # name of html index file
self.html_index_title = 'Plot Index' # title at top of index page
self.html_homelink = None # link to here from top of _PlotIndex.html
self.html_movie = True # make html with java script for movie
self.html_eagle = False # use EagleClaw titles on html pages?
self.gif_movie = False # make animated gif movie of frames
# self.clear_figs = True # give clf() command in each figure
# before plotting each frame
self.setplot = False # Execute setplot.py in plot routine
# self.setplot_caller = None # Set before calling setplot
self.mapc2p = None # function to map computational
# points to physical
self.beforeframe = None # function called before all plots
# in each frame are done
self.afterframe = None # function called after all plots
# in each frame are done
self.plotfigure_dict = {}
self.otherfigure_dict = {}
self.framesoln_dict = {} # dictionary for holding framesoln
# objects associated with plots
self.gaugesoln_dict = {} # dictionary for holding gaugesoln
# objects associated with plots
self.save_frames = True # True ==> Keep a copy of any frame
# read in. False ==> Clear the frame
# solution dictionary before adding
# another solution
self.save_figures = True # True ==> Keep a copy of and figure
# created. False ==> Clear the
# figure dictionary before adding
# another solution
self.refresh_frames = False # False ==> don't re-read framesoln if
# already in framesoln_dict
self.refresh_gauges = False # False ==> don't re-read gaugesoln if
# already in gaugesoln_dict
self._next_FIG = 1000
self._fignames = []
self._fignos = []
self._mode = 'unknown'
self._figname_from_num = {}
self._otherfignames = []
#if data_file_list is not None:
if len(data_files) > 0:
# values in data files may overwrite some default values
# or set parameter values in params dictionary
for data_file in data_files:
self.read(data_file)
def new_plotfigure(self, name=None, figno=None, type='each_frame'):
"""
Create a new figure for Clawpack plots.
If type='each_frame' it is a figure that will be plotted
for each time frame.
If type='multi_frame' it is a figure that will be plotted based on
all the frames, such as x-t plots or time series. (Not yet implemented)
"""
if (self._mode != 'iplotclaw') and (name in self._fignames):
print '*** Warning, figure named %s has already been created' % name
if (self._mode != 'iplotclaw') and (figno in self._fignos):
print '*** Warning, figure number %s has already been created' % figno
if figno is None:
self._next_FIG += 1
figno = self._next_FIG
if name is None:
name = "FIG%s" % figno
if name in self._fignames:
print "*** Error in new_plotfigure: Figure name already used... ",name
raise Exception("Figure name already used")
elif figno in self._fignos:
print "*** Error in new_plotfigure: Figure number already used... ",figno
raise Exception("Figure number already used")
self._fignames.append(name)
self._fignos.append(figno)
plotfigure = ClawPlotFigure(name, figno, type, self)
if not self.save_figures:
self.plotfigure_dict.clear()
self.plotfigure_dict[name] = plotfigure
self._figname_from_num[figno] = name
return plotfigure
def getframe(self,frameno,outdir=None):
"""
ClawPlotData.getframe:
Return an object of class Solution containing the solution
for frame number frameno.
If self.refresh_frames == True then this frame is read from the fort
files, otherwise it is read from the fort files only if the
the dictionary self.framesoln_dict has no key frameno. If it does, the
frame has previously been read and the dictionary value is returned.
"""
from pyclaw import solution
framesoln_dict = self.framesoln_dict
if 0:
if outdir:
key = (frameno, outdir)
else:
key = frameno
outdir = self.outdir
if outdir is None:
outdir = self.outdir
outdir = os.path.abspath(outdir)
key = (frameno, outdir)
if self.refresh_frames or (not framesoln_dict.has_key(key)):
thisdir = os.getcwd()
try:
os.chdir(outdir)
except:
print '*** Error in getframe: cannot move to outdir = ',\
outdir
print '*** thisdir = ',thisdir
raise
return
try:
framesoln = solution.Solution(frameno,format=self.format)
except:
print '*** Error reading frame in ClawPlotData.getframe'
os.chdir(thisdir)
raise
return
os.chdir(thisdir)
if not self.save_frames:
framesoln_dict.clear()
framesoln_dict[key] = framesoln
if key != frameno:
print ' Reading Frame %s at t = %g from outdir = %s' \
% (frameno,framesoln.t,outdir)
else:
print ' Reading Frame %s at t = %g ' \
% (frameno,framesoln.t)
else:
framesoln = self.framesoln_dict[key]
return framesoln
def gettime(self,frameno,outdir='./'):
r"""Fetch time from solution corresponding to frame number in outdir
This method only works for ascii and netcdf formatted files
"""
if self.format=='ascii':
from pyclaw.io.ascii import read_ascii_t
t,meqn,ngrids,maux,ndim = read_ascii_t(frameno,path=outdir)
return t
elif self.format=='netcdf':
from pyclaw.io.netcdf import read_netcdf_t
t=read_netcdf_t(frameno,path=outdir)
return t
def clearfigures(self):
"""
Clear all plot parameters specifying figures, axes, items.
Does not clear the frames of solution data already read in.
For that use clearframes.
"""
self.plotfigure_dict.clear()
self._fignames = []
self.otherfigure_dict.clear()
self._otherfignames = []
self._fignos = []
self._next_FIG = 1000
def clearframes(self, framenos='all'):
"""
Clear one or more frames from self.framesoln_dict.
Need to add outdir option!
"""
if isinstance(framenos, int):
framenos = [framenos] # turn into a list
if framenos=='all':
self.framesoln_dict.clear()
print 'Cleared all frames'
else:
for frameno in framenos:
xxx = self.plotdata.framesoln_dict.pop(frameno,None)
if xxx is None:
print 'No frame data to clear for frame ',frameno
else:
print 'Cleared data for frame ',frameno
def getgauge(self, gaugeno, outdir=None):
"""
ClawPlotData.getgauge:
Return an object of class GaugeSolution containing the solution
for gauge number gaugeno.
If self.refresh_gauges == True then this gauge is read from the
fort.gauge file, otherwise it is read only if the
the dictionary self.gaugesoln_dict has no key gaugeno. If it does, the
gauge has previously been read and the dictionary value is returned.
"""
gaugesoln_dict = self.gaugesoln_dict
if outdir is None:
outdir = self.outdir
outdir = os.path.abspath(outdir)
key = (gaugeno, outdir)
if self.refresh_gauges or (not gaugesoln_dict.has_key(key)):
thisdir = os.getcwd()
try:
os.chdir(outdir)
except:
print '*** Error in getgauge: cannot move to outdir = ',\
outdir
print '*** thisdir = ',thisdir
raise
return
try:
gauges = self.read_gauges(outdir)
except:
print '*** Error reading gauges in ClawPlotData.getgauge'
print '*** outdir = ', outdir
print '*** thisdir = ', thisdir
os.chdir(thisdir)
raise
return
os.chdir(thisdir)
try:
for (k,v) in gauges.iteritems():
gaugesoln_dict[(k, outdir)] = v
except:
raise("*** Problem setting gaugesoln_dict in getgauge")
#print ' Read all gauge data from %s/fort.gauge' % outdir
try:
gaugesoln = gaugesoln_dict[key]
except:
print "*** Cannot find key = ",key
print "*** in gaugesoln_dict = ",gaugesoln_dict
raise("*** Problem getting gaugesoln in getgauge")
return gaugesoln
def read_gauges(self, outdir='.'):
"""
Read the gauge output in file fort.gauge in the directory specified by
outdir.
Returns a dictionary *gauges* with an entry for each gauge number.
Each entry is an object of class GaugeSolution
"""
import os
import numpy as np
from matplotlib.mlab import find
from pyclaw.plotters import gaugetools
from StringIO import StringIO
fname = outdir + '/fort.gauge'
if not os.path.isfile(fname):
print "*** Gauge file not found: ",fname
gauges = {}
print ' Reading gauge data from ',fname
def stars2num(s):
"""
Converter to us in case gauge number was too long and
Fortran printed stars instead of the number.
"""
if s[0]=='*':
gaugeno = 99999
else:
gaugeno = int(s)
return gaugeno
try:
gdata = np.loadtxt(fname,converters={0:stars2num})
except:
try:
print "*** Warning: incomplete last line, computation may "
print "*** still be in progress "
gdata_lines = open(fname,'r').read()
gdata_end = gdata_lines.rfind('\n',-200,-1)
gdata_file = StringIO(gdata_lines[:gdata_end+1])
gdata = np.loadtxt(gdata_file,converters={0:stars2num})
except:
print "*** Problem reading file ",fname
#print "*** Possibly an incomplete last line if computation is still in progress"
raise Exception("Problem reading fort.gauge")
gauges = {}
gaugeno = np.array(gdata[:,0], dtype=int)
level = np.array(gdata[:,1], dtype=int)
t = gdata[:,2]
q = gdata[:,3:] # all remaining columns are stored in q
setgauges = gaugetools.read_setgauges(datadir=outdir)
gauges = {}
gaugenos = set(gaugeno) # reduces to unique elements
for n in gaugenos:
n = int(n)
gauges[n] = GaugeSolution()
gauges[n].gaugeno = n
nn = find(gaugeno==n)
gauges[n].level = level[nn]
gauges[n].t = t[nn]
gauges[n].q = q[nn,:]
# Locations:
try:
gauges[n].x = setgauges.x[n]
gauges[n].y = setgauges.y[n]
gauges[n].t1 = setgauges.t1[n]
gauges[n].t2 = setgauges.t2[n]
except:
print "*** Could not extract gauge locations for gaugeno = ",n
print ' Found gauge numbers: ',gauges.keys()
return gauges
def plotframe(self, frameno):
from pyclaw.plotters import frametools
frametools.plotframe(frameno, self)
def printframes(self, verbose=True):
#from pyclaw.plotters import frametools
#frametools.printframes(self, verbose)
print "*** printframes is deprecated. Use plotpages.plotclaw_driver"
print "*** for added capabilities."
def fignos(self):
"""
Return a list of the figure numbers actually used.
Useful in afterframe function for example to loop over all
figures and do something.
"""
return self._fignos
def mode(self):
"""
Return self._mode, which is set internally to
'iplotclaw' if Iplotclaw is in use,
'printframes' if printframes is being used
Useful in afterframe function if you want to do different things
for interactive or print modes.
"""
return self._mode
def iplotclaw(self):
"""
Return True if interactive plotting with iplotclaw is being done.
"""
return (self._mode == 'iplotclaw')
def getfigure(self,figname):
try:
plotfigure = self.plotfigure_dict[figname]
except:
print '*** Error accessing plotfigure_dict[%s]' % figname
return None
return plotfigure
def getaxes(self,axesname,figname=None):
found = True
if not figname:
found = False
for fig in self._fignames:
plotfigure = self.getfigure(fig)
if axesname in plotfigure._axesnames:
if found == True: # already found!
print '*** Ambiguous... must specify figname'
print ' try getaxes(axesname, figname)'
return None
figname = fig
found = True
if not found:
print '*** No axes found with name = ',axesname
return None
try:
plotfigure = self.getfigure(figname)
plotaxes = plotfigure.plotaxes_dict[axesname]
except:
print '*** Error accessing plotaxes[%s]' % axesname
print '*** figname = %s' % figname
return None
return plotaxes
def getitem(self,itemname,axesname=None,figname=None):
found = True
if not figname:
# search over all figures looking for the item
found = False
for fign in self._fignames:
plotfigure = self.getfigure(fign)
if not axesname:
# search over all axes looking for the item
for axesn in plotfigure._axesnames:
plotaxes = self.getaxes(axesn,fign)
if itemname in plotaxes._itemnames:
if found == True: # already found!
print '*** Ambiguous... must specify figname and/or axesname'
print ' try getitem(itemname, axesname, figname)'
return None
axesname = axesn
figname = fign
found = True
else:
# axesname was specified (but not figname)
plotaxes = self.getaxes(axesname,fign)
if itemname in plotaxes._itemnames:
if found == True: # already found!
print '*** Ambiguous... must specify figname and/or axesname'
print ' try getitem(itemname, axesname, figname)'
return None
figname = fign
found = True
elif not axesname:
# figname was specified but not axesname.
# search over all axes looking for the item
found = False
plotfigure = self.getfigure(figname)
for axesn in plotfigure._axesnames:
plotaxes = self.getaxes(axesn,figname)
if itemname in plotaxes._itemnames:
if found == True: # already found!
print '*** Ambiguous... must specify axesname'
print ' try getitem(itemname, axesname, figname)'
return None
axesname = axesn
found = True
if not found:
print '*** No item found with name = ',itemname
return None
try:
plotaxes = self.getaxes(axesname,figname)
plotitem = plotaxes.plotitem_dict[itemname]
except:
print '*** Error accessing plotitem[%s]' % itemname
print '*** figname = ',figname
print '*** axesname = ',axesname
return None
return plotitem
def showitems(self):
fignames = self._fignames
print "\n\nCurrent plot figures, axes, and items:"
print "---------------------------------------"
for figname in fignames:
plotfigure = self.getfigure(figname)
s = " figname = %s, figno = %s" % (figname, plotfigure.figno)
if not plotfigure._show:
s = s + " [Not showing]"
print s
axesnames = plotfigure._axesnames
for axesname in axesnames:
plotaxes = self.getaxes(axesname,figname)
s = " axesname = %s, axescmd = %s" \
% (axesname, plotaxes.axescmd)
if not plotaxes._show:
s = s + " [Not showing]"
print s
for itemname in plotaxes._itemnames:
plotitem = self.getitem(itemname,axesname,figname)
plot_type = plotitem.plot_type
s = " itemname = %s, plot_type = %s" \
% (itemname,plot_type)
if not plotitem._show:
s = s + " [Not showing]"
print s
print " "
def getq(self,frameno):
solution = self.getframe(frameno)
grids = solution.grids
if len(grids) > 1:
print '*** Warning: more than 1 grid, q on grid[0] is returned'
q = grids[0].q
return q
def new_otherfigure(self, name=None):
"""
Create a new figure for Clawpack plots.
For figures not repeated each frame.
"""
if (self._mode != 'iplotclaw') and (name in self._fignames):
print '*** Warning, figure named %s has already been created' % name
if name is None:
raise Exception("Need to provide name in new_otherfigure")
if name in self._otherfignames:
print "*** Error in new_otherfigure: Figure name already used... ",name
raise Exception("Figure name already used")
self._otherfignames.append(name)
otherfigure = ClawOtherFigure(name,self)
self.otherfigure_dict[name] = otherfigure
return otherfigure
# ============================================================================
# Subclass ClawPlotFigure containing data for plotting a figure
# ============================================================================
class ClawPlotFigure(Data):
"""
Data subclass containing plot data needed to plot a single figure.
This may consist of several ClawPlotAxes objects.
"""
# ========================================================================
# Initialization routine
# ========================================================================
def __init__(self, name, figno, type, plotdata):
"""
Initialize a ClawPlotFigure object
"""
attributes = ['name','figno','_plotdata','clf','plotaxes_dict', \
'_axesnames','show','_show','kwargs','_handle',\
'_type']
super(ClawPlotFigure, self).__init__(attributes = attributes)
self._plotdata = plotdata # parent ClawPlotData object
self.name = name
self.figno = figno
self.kwargs = {}
self.clf_each_frame = True
self.clf_each_gauge = True
self._axesnames = []
self.show = True
self._show = True
self.plotaxes_dict = {}
self.type = type # = 'each_frame' or 'each_run' or 'each_gauge'
self._next_AXES = 0
def new_plotaxes(self, name=None, type='each_frame'):
"""
Create a new axes that will be plotted in this figure.
If type='each_frame' it is an axes that will be plotted
for each time frame.
If type='multi_frame' it is an axes that will be plotted based on
all the frames, such as x-t plots or time series. (Not yet implemented)
If type='empty' it is created without doing any plots using the
pyclaw tools. Presumably the user will create a plot within an
afteraxes command, for example.
"""
if name is None:
self._next_AXES += 1
name = "AXES%s" % self._next_AXES
if name in self._axesnames:
print '*** Warning, axes named %s has already been created' % name
if name not in self._axesnames:
self._axesnames.append(name)
plotaxes = ClawPlotAxes(name, self)
self.plotaxes_dict[name] = plotaxes
plotaxes.type = type
return plotaxes
def gethandle(self):
_handle = getattr(self,'_handle',None)
return _handle
# ============================================================================
# Subclass ClawPlotAxes containing data for plotting axes within a figure
# ============================================================================
class ClawPlotAxes(Data):
"""
Data subclass containing plot data needed to plot a single axes.
This may consist of several ClawPlotItem objects.
"""
# ========================================================================
# Initialization routine
# ========================================================================
def __init__(self, name, plotfigure):
"""
Initialize a ClawPlotAxes object
"""
attributes = ['name','type','figno','plotdata','plotfigure','title',\
'axescmd','xlimits','ylimits','plotitem_dict', 'user',\
'afteraxes','_itemnames','show','_show','_handle', \
'_plotfigure','_plotdata', 'scaled']
super(ClawPlotAxes, self).__init__(attributes = attributes)
self._plotfigure = plotfigure # figure this item is on
self._plotdata = plotfigure._plotdata # parent ClawPlotData object
self.name = name
self.title = name
self.title_with_t = True # creates title of form 'title at time t = ...'
self.axescmd = 'subplot(1,1,1)'
self.user = Data() # for user to pass things into
# afteraxes, for example
# Deprecated.
self.afteraxes = None
self.xlimits = None
self.ylimits = None
self.scaled = False # true so x- and y-axis scaled same
self.plotitem_dict = {}
self.type = 'each_frame'
self._itemnames = []
self.show = True
self._show = True
self._handle = None
self._next_ITEM = 0
self.figno = self._plotfigure.figno
def new_plotitem(self, name=None, plot_type=None):
# Create a new entry in self.plotitem_dict
if name is None:
self._next_ITEM += 1
name = "ITEM%s" % self._next_ITEM
if name not in self._itemnames:
self._itemnames.append(name)
plotitem = ClawPlotItem(name, plot_type, plotaxes=self)
self.plotitem_dict[name] = plotitem
return plotitem
def get_plotdata(self):
plotdata = getattr(self,'_plotdata',None)
return self._plotdata
def get_plotfigure(self):
plotfigure = getattr(self,'_plotfigure',None)
return self._plotfigure
def gethandle(self):
_handle = getattr(self,'_handle',None)
return self._handle
# ============================================================================
# Subclass ClawPlotItem containing data for plotting a single object
# ============================================================================
class ClawPlotItem(Data):
"""
Data subclass containing plot data needed to plot a single object.
This may be a single curve, set of points, contour plot, etc.
"""
# ========================================================================
# Initialization routine
# ========================================================================
def __init__(self, name, plot_type, plotaxes):
"""
Initialize a ClawPlotItem object
"""
attributes = ['ndim','outdir','refresh_frames',\
'plot_var','plot_var_title', \
'MappedGrid', 'mapc2p', \
'figno', 'handle', 'params', \
'aftergrid','afteritem','framesoln_dict', \
'_pobjs']
super(ClawPlotItem, self).__init__(attributes = attributes)
self._plotaxes = plotaxes # axes this item is on
self._plotfigure = plotaxes._plotfigure # figure this item is on
self._plotdata = plotaxes._plotfigure._plotdata # parent ClawPlotData object
try:
ndim = int(plot_type[0]) # first character of plot_type should be ndim
except:
print '*** Error: could not determine ndim from plot_type = ',plot_type
self.ndim = ndim
self.name = name
self.figno = plotaxes.figno
self.outdir = None # indicates data comes from
# self._plotdata.outdir
self.plot_type = plot_type
self.plot_var = 0
self.plot_show = True
self.MappedGrid = None # False to plot on comput. grid even
# if _plotdata.mapc2p is not None.
self.mapc2p = None # function to map computational
# points to physical (over-rides
# plotdata.mapc2p if set for item
self.aftergrid = None # function called after each grid is
# plotted within each single plotitem.
self.afteritem = None # function called after the item is
# plotted for each frame
self.user = Data() # for user to pass things into
# aftergrid, for example
# Deprecated.
self.show = True # False => suppress showing this item
self._show = True # Internal
self._current_pobj = None
if ndim == 1:
self.add_attribute('plotstyle','-')
self.add_attribute('color',None)
self.add_attribute('kwargs',{})
if plot_type == '1d_fill_between':
zero_function = lambda current_data: 0.
self.add_attribute('plot_var2',zero_function)
self.add_attribute('fill_where',None)
if plot_type == '1d_from_2d_data':
self.add_attribute('map2d_to_1d',None)
elif ndim == 2:
# default values specifying this single plot:
self.add_attribute('plot_type',plot_type)
self.add_attribute('gridlines_show',0)
self.add_attribute('gridlines_color','k')
self.add_attribute('grid_bgcolor','w')
self.add_attribute('gridedges_show',0)
self.add_attribute('gridedges_color','k')
self.add_attribute('kwargs',{})
if plot_type == '2d_pcolor':
# from pylab import cm
# self.add_attribute('pcolor_cmap',cm.RdYlBu,True)
from pyclaw.plotters import colormaps
self.add_attribute('pcolor_cmap',colormaps.yellow_red_blue)
self.add_attribute('pcolor_cmin',None)
self.add_attribute('pcolor_cmax',None)
self.add_attribute('add_colorbar',True)
elif plot_type == '2d_imshow':
# from pylab import cm
# self.add_attribute('pcolor_cmap',cm.RdYlBu,True)
from pyclaw.plotters import colormaps
self.add_attribute('imshow_cmap',colormaps.yellow_red_blue)
self.add_attribute('imshow_cmin',None)
self.add_attribute('imshow_cmax',None)
self.add_attribute('add_colorbar',True)
elif plot_type == '2d_contour':
self.add_attribute('contour_nlevels',20)
self.add_attribute('contour_levels',None)
self.add_attribute('contour_min',None)
self.add_attribute('contour_max',None)
self.add_attribute('contour_show',1)
self.add_attribute('contour_color','k')
self.add_attribute('contour_cmap',None)
self.add_attribute('add_colorbar',False)
elif plot_type == '2d_schlieren':
from pyclaw.plotters import colormaps
self.add_attribute('schlieren_cmap',colormaps.schlieren_grays)
self.add_attribute('schlieren_cmin',None)
self.add_attribute('schlieren_cmax',None)
self.add_attribute('add_colorbar',False)
elif plot_type == '2d_grid':
self.add_attribute('max_density',None)
self.gridlines_show = True
elif plot_type == '2d_quiver':
self.add_attribute('quiver_var_x',None)
self.add_attribute('quiver_var_y',None)
self.add_attribute('quiver_coarsening',1)
self.add_attribute('quiver_key_show',False)
self.add_attribute('quiver_key_label_x',0.15)
self.add_attribute('quiver_key_label_y',0.95)
self.add_attribute('quiver_key_units','')
self.add_attribute('quiver_key_scale',None)
self.add_attribute('quiver_key_kwargs',{})
else:
print '*** Warning 2d plot type %s not recognized' % plot_type
elif ndim == 3:
print '*** Warning- ClawPlotItem not yet set up for ndim = 3'
else:
print '*** Warning- Unrecognized plot_type in ClawPlotItem'
self.params = {} # dictionary to hold optional parameters
def getframe(self,frameno):
"""
ClawPlotItem.getframe:
Return an object of class Solution containing the solution
for frame number frameno.
If self.refresh_frames == True then this frame is read from the fort
files, otherwise it is read from the fort files only if the
the dictionary self.framesoln_dict has key frameno. If it does, the
frame has previously been read and the dictionary value is returned.
"""
plotdata = self._plotdata
outdir = self.outdir
framesoln = plotdata.getframe(frameno, outdir)
return framesoln
def getgauge(self,gauge):
"""
ClawPlotItem.getgauge:
Return an object of class GaugeSolution containing the solution
for gauge number gaugeno.
If self.refresh_gauges == True then this gauge is read from the
fort.gauge file, otherwise it is read only if the
the dictionary self.gaugesoln_dict has no key gaugeno. If it does, the
gauge has previously been read and the dictionary value is returned.
"""
plotdata = self._plotdata
outdir = self.outdir
gaugesoln = plotdata.getgauge(gauge, outdir)
return gaugesoln
#-----------------------------------------------------------------------
# New classes and functions for dealing with data in setrun function.
class ClawInputData(Data):
"""
Object that will be written out to claw.data.
"""
def __init__(self, ndim):
super(ClawInputData,self).__init__()
self.add_attribute('ndim',ndim)
# Set default values:
if ndim == 1:
self.add_attribute('mx',100)
self.add_attribute('nout',5)
self.add_attribute('outstyle',1)
self.add_attribute('tfinal',1.0)
self.add_attribute('dt_initial',1.e-5)
self.add_attribute('dt_max',1.e99)
self.add_attribute('cfl_desired',0.9)
self.add_attribute('cfl_max',1.0)
self.add_attribute('max_steps',5000)
self.add_attribute('dt_variable',1)
self.add_attribute('order',2)
self.add_attribute('order_trans',0)
self.add_attribute('verbosity',0)
self.add_attribute('src_split',0)
self.add_attribute('mcapa',0)
self.add_attribute('maux',0)
self.add_attribute('meqn',1)
self.add_attribute('mwaves',1)
self.add_attribute('mthlim',[4])
self.add_attribute('t0',0.)
self.add_attribute('xlower',0.)
self.add_attribute('xupper',1.)
self.add_attribute('mbc',2)
self.add_attribute('mthbc_xlower',1)
self.add_attribute('mthbc_xupper',1)
self.add_attribute('restart',0)
self.add_attribute('N_restart',0)
elif ndim == 2:
self.add_attribute('mx',100)
self.add_attribute('my',100)
self.add_attribute('nout',5)
self.add_attribute('outstyle',1)
self.add_attribute('tfinal',1.0)
self.add_attribute('dt_initial',1.e-5)
self.add_attribute('dt_max',1.e99)
self.add_attribute('cfl_desired',0.9)
self.add_attribute('cfl_max',1.0)
self.add_attribute('max_steps',5000)
self.add_attribute('dt_variable',1)
self.add_attribute('order',2)
self.add_attribute('order_trans',2)
self.add_attribute('verbosity',0)
self.add_attribute('src_split',0)
self.add_attribute('mcapa',0)
self.add_attribute('maux',0)
self.add_attribute('meqn',1)
self.add_attribute('mwaves',1)
self.add_attribute('mthlim',[4])
self.add_attribute('t0',0.)
self.add_attribute('xlower',0.)
self.add_attribute('xupper',1.)
self.add_attribute('ylower',0.)
self.add_attribute('yupper',1.)
self.add_attribute('mbc',2)
self.add_attribute('mthbc_xlower',1)
self.add_attribute('mthbc_xupper',1)
self.add_attribute('mthbc_ylower',1)
self.add_attribute('mthbc_yupper',1)
self.add_attribute('restart',0)
self.add_attribute('N_restart',0)
else:
print '*** Error: only ndim=1 or 2 supported so far ***'
raise()
def write(self):
print 'Creating data file claw.data for use with xclaw'
make_clawdatafile(self)
class AmrclawInputData(Data):
"""
Object that will be written out to amr2ez.data.
"""
def __init__(self, ndim):
super(AmrclawInputData,self).__init__()
self.add_attribute('ndim',ndim)
# Set default values:
if ndim == 1:
self.add_attribute('mx',100)
self.add_attribute('nout',5)
self.add_attribute('outstyle',1)
self.add_attribute('tfinal',1.0)
self.add_attribute('dt_initial',1.e-5)
self.add_attribute('dt_max',1.e99)
self.add_attribute('cfl_desired',0.9)
self.add_attribute('cfl_max',1.0)
self.add_attribute('max_steps',5000)
self.add_attribute('dt_variable',1)
self.add_attribute('order',2)
self.add_attribute('order_trans',0)
self.add_attribute('verbosity',0)
self.add_attribute('src_split',0)
self.add_attribute('mcapa',0)
self.add_attribute('maux',0)
self.add_attribute('meqn',1)
self.add_attribute('mwaves',1)
self.add_attribute('mthlim',[4])
self.add_attribute('t0',0.)
self.add_attribute('xlower',0.)
self.add_attribute('xupper',1.)
self.add_attribute('mbc',2)
self.add_attribute('mthbc_xlower',1)
self.add_attribute('mthbc_xupper',1)
self.add_attribute('restart',0)
self.add_attribute('N_restart',0)
# attributes need only since AMR is done using 2d amrclaw:
self.add_attribute('my',1)
self.add_attribute('ylower',0.)
self.add_attribute('yupper',1.)
self.add_attribute('mthbc_ylower',1)
self.add_attribute('mthbc_yupper',1)
self.add_attribute('inraty',[1,1,1,1,1,1])
elif ndim == 2:
self.add_attribute('mx',100)
self.add_attribute('my',100)
self.add_attribute('nout',5)
self.add_attribute('outstyle',1)
self.add_attribute('tfinal',1.0)
self.add_attribute('dt_initial',1.e-5)
self.add_attribute('dt_max',1.e99)
self.add_attribute('cfl_desired',0.9)
self.add_attribute('cfl_max',1.0)
self.add_attribute('max_steps',5000)
self.add_attribute('dt_variable',1)
self.add_attribute('order',2)
self.add_attribute('order_trans',2)
self.add_attribute('verbosity',0)
self.add_attribute('src_split',0)
self.add_attribute('mcapa',0)
self.add_attribute('maux',0)
self.add_attribute('meqn',1)
self.add_attribute('mwaves',1)
self.add_attribute('mthlim',[4])
self.add_attribute('t0',0.)
self.add_attribute('xlower',0.)
self.add_attribute('xupper',1.)
self.add_attribute('ylower',0.)
self.add_attribute('yupper',1.)
self.add_attribute('mbc',2)
self.add_attribute('mthbc_xlower',1)
self.add_attribute('mthbc_xupper',1)
self.add_attribute('mthbc_ylower',1)
self.add_attribute('mthbc_yupper',1)
self.add_attribute('restart',0)
self.add_attribute('N_restart',0)
self.add_attribute('inraty',[1])
if ndim <= 2:
# AMR parameters:
self.add_attribute('mxnest',-1)
self.add_attribute('inratx',[1])
self.add_attribute('inratt',[1])
self.add_attribute('auxtype',[])
self.add_attribute('restart',False)
self.add_attribute('checkpt_iousr',1000)
self.add_attribute('tol',-1.0)
self.add_attribute('tolsp',0.05)
self.add_attribute('kcheck',2)
self.add_attribute('ibuff',3)
self.add_attribute('cutoff',0.7)
self.add_attribute('PRINT',False)
self.add_attribute('NCAR',False)
self.add_attribute('fortq',True)
self.add_attribute('dprint',False)
self.add_attribute('eprint',False)
self.add_attribute('edebug',False)
self.add_attribute('gprint',False)
self.add_attribute('nprint',False)
self.add_attribute('pprint',False)
self.add_attribute('rprint',False)
self.add_attribute('sprint',False)
self.add_attribute('tprint',False)
self.add_attribute('uprint',False)
else:
print '*** Error: only ndim=1 or 2 supported so far ***'
raise()
def write(self):
print 'Creating data file amr2ez.data for use with xamr'
make_amrclawdatafile(self)
def open_datafile(name, datasource='setrun.py'):
"""
Open a data file and write a warning header.
Warning header starts with '#' character. These lines are skipped if
data file is opened using the library routine opendatafile.
INPUT:
name - name of data file
OUTPUT:
file - file object
"""
import string
source = string.ljust(datasource,25)
file = open(name, 'w')
file.write('########################################################\n')
file.write('### DO NOT EDIT THIS FILE: GENERATED AUTOMATICALLY ####\n')
file.write('### To modify data, edit %s ####\n' % source)
file.write('### and then "make .data" ####\n')
file.write('########################################################\n\n')
return file
def data_write(file, dataobj, name=None, descr=''):
"""
Write out value to data file, in the form
value =: name descr
Remove brackets and commas from lists, and replace booleans by T/F.
Also convert numpy array to a list first.
INPUTS
name, normally a string defining the variable
if name==None, write a blank line.
descr, a short description to appear on the line
"""
import string
if name is None:
file.write('\n')
else:
try:
value = getattr(dataobj, name)
except:
print "Variable missing: ",name
print " from dataobj = ", dataobj
raise
# Convert value to an appropriate string repr
import numpy
if isinstance(value,numpy.ndarray):
value = list(value)
if isinstance(value,tuple) | isinstance(value,list):
# Remove [], (), and ','
string_value = repr(value)[1:-1]
string_value = string_value.replace(',','')
elif isinstance(value,bool):
if value:
string_value = 'T'
else:
string_value = 'F'
else:
string_value = repr(value)
padded_value = string.ljust(string_value, 25)
padded_name = string.ljust(name, 12)
file.write('%s =: %s %s\n' % (padded_value, padded_name, descr))
def make_clawdatafile(clawdata):
"""
Take the data specified in clawdata and write it to claw.data in the
form required by the Fortran code lib/main.f95.
"""
# open file and write a warning header:
file = open_datafile('claw.data')
ndim = clawdata.ndim
data_write(file, clawdata, 'ndim', '(number of dimensions)')
data_write(file, clawdata, 'mx', '(cells in x direction)')
if ndim > 1:
data_write(file, clawdata, 'my', '(cells in y direction)')
if ndim == 3:
data_write(file, clawdata, 'mz', '(cells in z direction)')
data_write(file, clawdata, None) # writes blank line
data_write(file, clawdata, 'nout', '(number of output times)')
data_write(file, clawdata, 'outstyle', '(style of specifying output times)')
if clawdata.outstyle == 1:
data_write(file, clawdata, 'tfinal', '(final time)')
elif clawdata.outstyle == 2:
data_write(file, clawdata, 'tout', '(output times)')
elif clawdata.outstyle == 3:
data_write(file, clawdata, 'iout', '(output every iout steps)')
else:
print '*** Error: unrecognized outstyle'
raise
return
data_write(file, clawdata, None)
data_write(file, clawdata, 'dt_initial', '(initial time step dt)')
data_write(file, clawdata, 'dt_max', '(max allowable dt)')
data_write(file, clawdata, 'cfl_max', '(max allowable Courant number)')
data_write(file, clawdata, 'cfl_desired', '(desired Courant number)')
data_write(file, clawdata, 'max_steps', '(max time steps per call to claw)')
data_write(file, clawdata, None)
data_write(file, clawdata, 'dt_variable', '(1 for variable dt, 0 for fixed)')
data_write(file, clawdata, 'order', '(1 or 2)')
if ndim == 1:
data_write(file, clawdata, 'order_trans', '(not used in 1d)')
else:
data_write(file, clawdata, 'order_trans', '(transverse order)')
data_write(file, clawdata, 'verbosity', '(verbosity of output)')
data_write(file, clawdata, 'src_split', '(source term splitting)')
data_write(file, clawdata, 'mcapa', '(aux index for capacity fcn)')
data_write(file, clawdata, 'maux', '(number of aux variables)')
data_write(file, clawdata, None)
data_write(file, clawdata, 'meqn', '(number of equations)')
data_write(file, clawdata, 'mwaves', '(number of waves)')
data_write(file, clawdata, 'mthlim', '(limiter choice for each wave)')
data_write(file, clawdata, None)
data_write(file, clawdata, 't0', '(initial time)')
data_write(file, clawdata, 'xlower', '(xlower)')
data_write(file, clawdata, 'xupper', '(xupper)')
if ndim > 1:
data_write(file, clawdata, 'ylower', '(ylower)')
data_write(file, clawdata, 'yupper', '(yupper)')
if ndim == 3:
data_write(file, clawdata, 'zlower', '(zlower)')
data_write(file, clawdata, 'zupper', '(zupper)')
data_write(file, clawdata, None)
data_write(file, clawdata, 'mbc', '(number of ghost cells)')
data_write(file, clawdata, 'mthbc_xlower', '(type of BC at xlower)')
data_write(file, clawdata, 'mthbc_xupper', '(type of BC at xupper)')
if ndim > 1:
data_write(file, clawdata, 'mthbc_ylower', '(type of BC at ylower)')
data_write(file, clawdata, 'mthbc_yupper', '(type of BC at yupper)')
if ndim == 3:
data_write(file, clawdata, 'mthbc_zlower', '(type of BC at zlower)')
data_write(file, clawdata, 'mthbc_zupper', '(type of BC at zupper)')
file.close()
def make_amrclawdatafile(clawdata):
"""
Take the data specified in clawdata and write it to claw.data in the
form required by the Fortran code lib/main.f95.
"""
# open file and write a warning header:
file = open_datafile('amr2ez.data')
ndim = clawdata.ndim
#data_write(file, clawdata, 'ndim', '(number of dimensions)')
data_write(file, clawdata, 'mx', '(cells in x direction)')
data_write(file, clawdata, 'my', '(cells in y direction)')
if ndim == 3:
data_write(file, clawdata, 'mz', '(cells in z direction)')
data_write(file, clawdata, 'mxnest', '(max number of grid levels)')
data_write(file, clawdata, 'inratx', '(refinement ratios)')
data_write(file, clawdata, 'inraty', '(refinement ratios)')
if ndim == 3:
data_write(file, clawdata, 'inratz', '(refinement ratios)')
data_write(file, clawdata, 'inratt', '(refinement ratios)')
data_write(file, clawdata, None) # writes blank line
data_write(file, clawdata, 'nout', '(number of output times)')
data_write(file, clawdata, 'outstyle', '(style of specifying output times)')
if clawdata.outstyle == 1:
data_write(file, clawdata, 'tfinal', '(final time)')
elif clawdata.outstyle == 2:
data_write(file, clawdata, 'tout', '(output times)')
elif clawdata.outstyle == 3:
data_write(file, clawdata, 'iout', '(output every iout steps)')
else:
print '*** Error: unrecognized outstyle'
raise
return
data_write(file, clawdata, None)
data_write(file, clawdata, 'dt_initial', '(initial time step dt)')
data_write(file, clawdata, 'dt_max', '(max allowable dt)')
data_write(file, clawdata, 'cfl_max', '(max allowable Courant number)')
data_write(file, clawdata, 'cfl_desired', '(desired Courant number)')
data_write(file, clawdata, 'max_steps', '(max time steps per call to claw)')
data_write(file, clawdata, None)
data_write(file, clawdata, 'dt_variable', '(1 for variable dt, 0 for fixed)')
data_write(file, clawdata, 'order', '(1 or 2)')
if ndim == 1:
data_write(file, clawdata, 'order_trans', '(not used in 1d)')
else:
data_write(file, clawdata, 'order_trans', '(transverse order)')
data_write(file, clawdata, 'verbosity', '(verbosity of output)')
data_write(file, clawdata, 'src_split', '(source term splitting)')
data_write(file, clawdata, 'mcapa', '(aux index for capacity fcn)')
data_write(file, clawdata, 'maux', '(number of aux variables)')
if len(clawdata.auxtype) != clawdata.maux:
file.close()
raise AttributeError, "require len(clawdata.auxtype) == clawdata.maux"
for i in range(clawdata.maux):
file.write("'%s'\n" % clawdata.auxtype[i])
data_write(file, clawdata, None)
data_write(file, clawdata, 'meqn', '(number of equations)')
data_write(file, clawdata, 'mwaves', '(number of waves)')
data_write(file, clawdata, 'mthlim', '(limiter choice for each wave)')
data_write(file, clawdata, None)
data_write(file, clawdata, 't0', '(initial time)')
data_write(file, clawdata, 'xlower', '(xlower)')
data_write(file, clawdata, 'xupper', '(xupper)')
data_write(file, clawdata, 'ylower', '(ylower)')
data_write(file, clawdata, 'yupper', '(yupper)')
if ndim == 3:
data_write(file, clawdata, 'zlower', '(zlower)')
data_write(file, clawdata, 'zupper', '(zupper)')
data_write(file, clawdata, None)
data_write(file, clawdata, 'mbc', '(number of ghost cells)')
data_write(file, clawdata, 'mthbc_xlower', '(type of BC at xlower)')
data_write(file, clawdata, 'mthbc_xupper', '(type of BC at xupper)')
data_write(file, clawdata, 'mthbc_ylower', '(type of BC at ylower)')
data_write(file, clawdata, 'mthbc_yupper', '(type of BC at yupper)')
if ndim == 3:
data_write(file, clawdata, 'mthbc_zlower', '(type of BC at zlower)')
data_write(file, clawdata, 'mthbc_zupper', '(type of BC at zupper)')
data_write(file, clawdata, None)
data_write(file, clawdata, 'restart', '(1 to restart from a past run)')
data_write(file, clawdata, 'checkpt_iousr', '(how often to checkpoint)')
data_write(file, clawdata, None)
data_write(file, clawdata, 'tol', '(tolerance for Richardson extrap)')
data_write(file, clawdata, 'tolsp', '(tolerance used in flag2refine)')
data_write(file, clawdata, 'kcheck', '(how often to regrid)')
data_write(file, clawdata, 'ibuff', '(buffer zone around flagged pts)')
data_write(file, clawdata, 'cutoff', '(efficiency cutoff for grid gen.)')
data_write(file, clawdata, None)
data_write(file, clawdata, 'PRINT', '(print to fort.amr)')
data_write(file, clawdata, 'NCAR', '(obsolete!)')
data_write(file, clawdata, 'fortq', '(Output to fort.q* files)')
data_write(file, clawdata, None)
data_write(file, clawdata, 'dprint', '(print domain flags)')
data_write(file, clawdata, 'eprint', '(print err est flags)')
data_write(file, clawdata, 'edebug', '(even more err est flags)')
data_write(file, clawdata, 'gprint', '(grid bisection/clustering)')
data_write(file, clawdata, 'nprint', '(proper nesting output)')
data_write(file, clawdata, 'pprint', '(proj. of tagged points)')
data_write(file, clawdata, 'rprint', '(print regridding summary)')
data_write(file, clawdata, 'sprint', '(space/memory output)')
data_write(file, clawdata, 'tprint', '(time step reporting each level)')
data_write(file, clawdata, 'uprint', '(update/upbnd reporting)')
file.close()
def make_userdatafile(userdata):
"""
Create the data file using the parameters in userdata.
The parameters will be written to this file in the same order they were
specified using userdata.add_attribute.
Presumably the user will read these in using a Fortran routine, such as
setprob.f95, and the order is important.
"""
# open file and write a warning header:
file = open_datafile(userdata._UserData__fname)
# write all the parameters:
for param in userdata.attributes:
data_write(file, userdata, param, \
userdata._UserData__descr[param])
file.close()
class GaugeSolution(Data):
"""
Holds gaugeno, t, q, x, y, t1, t2 for a single gauge.
"""
def __init__(self):
data_files = []
gauge_attrs = 'gaugeno level t q x y t1 t2'.split()
# Initialize the data object and read the data files
super(GaugeSolution,self).__init__(data_files,gauge_attrs)
# default values of attributes:
# none.
# ============================================================================
# Subclass ClawOtherFigure containing data for plotting a figure
# ============================================================================
class ClawOtherFigure(Data):
"""
Data subclass containing plot data needed to plot a single figure.
For figures that are not produced each frame.
"""
# ========================================================================
# Initialization routine
# ========================================================================
def __init__(self, name, plotdata):
"""
Initialize a ClawOtherFigure object
"""
attributes = ['name','_plotdata','fname','makefig']
super(ClawOtherFigure, self).__init__(attributes = attributes)
self._plotdata = plotdata # parent ClawPlotData object
self.name = name
self.fname = None # name of png file
self.makefig = None # function invoked to create figure
|
clawpack/clawpack-4.x
|
python/pyclaw/plotters/data.py
|
Python
|
bsd-3-clause
| 58,418
|
[
"NetCDF"
] |
9b7af73544f3cf30ee33ad01775decac90ca35ed24a8e4363b8b4833b6dfc2c7
|
from .tractography import Tractography
from .trackvis import tractography_from_trackvis_file, tractography_to_trackvis_file
from warnings import warn
import numpy
__all__ = [
'Tractography',
'tractography_from_trackvis_file', 'tractography_to_trackvis_file',
'tractography_from_files',
'tractography_from_file', 'tractography_to_file',
]
try:
__all__ += [
'tractography_from_vtk_files', 'tractography_to_vtk_file',
'vtkPolyData_to_tracts', 'tracts_to_vtkPolyData'
]
from .vtkInterface import (
tractography_from_vtk_files, tractography_to_vtk_file,
vtkPolyData_to_tracts, tracts_to_vtkPolyData
)
except ImportError:
warn(
'VTK support not installed in this python distribution, '
'VTK files will not be read or written'
)
def tractography_from_files(filenames):
if isinstance(filenames, str):
filenames = [filenames]
tracts = tractography_from_file(filenames[0])
for filename in filenames[1:]:
tracts_ = tractography_from_file(filename)
tracts.append(tracts_.tracts(), tracts_.tracts_data())
return tracts
def tractography_from_file(filename):
if filename.endswith('trk'):
return tractography_from_trackvis_file(filename)
elif filename.endswith('vtk') or filename.endswith('vtp'):
if 'tractography_from_vtk_files' in __all__:
return tractography_from_vtk_files(filename)
else:
raise IOError("No VTK support installed, VTK files could not be read")
else:
raise IOError("File format not supported")
def tractography_to_file(filename, tractography, **kwargs):
if filename.endswith('trk'):
if 'affine' not in kwargs:
if hasattr(tractography, 'affine'):
kwargs['affine'] = tractography.affine
else:
warn('Setting affine of trk file to the identity')
kwargs['affine'] = numpy.eye(4)
if 'image_dimensions' not in kwargs:
if hasattr(tractography, 'image_dims'):
kwargs['image_dimensions'] = tractography.image_dims
else:
warn('Setting image_dimensions of trk file to: 1 1 1')
kwargs['image_dimensions'] = numpy.ones(3)
return tractography_to_trackvis_file(filename, tractography, **kwargs)
elif filename.endswith('vtk') or filename.endswith('vtp'):
if 'tractography_from_vtk_files' in __all__:
return tractography_to_vtk_file(filename, tractography, **kwargs)
else:
raise IOError("No VTK support installed, VTK files could not be read")
else:
raise IOError("File format not supported")
|
oesteban/tract_querier
|
tract_querier/tractography/__init__.py
|
Python
|
bsd-3-clause
| 2,715
|
[
"VTK"
] |
74f00d8a7d957fabca90b239d83f59b5c02c2f25515f8ff7d22b31c150ba7158
|
from collections import defaultdict
import json, subprocess, sys, re, magic, csv, os, logging
#STRAWMAN FIXME
#THIS WOULD BE A PROGRAMMATICALLY INGESTED JSON ON MODULE LOAD IN REAL LIFE
#DONT HATE THIS PART
#programs = defaultdict(dict)
#programs['bwa']={"default":"/opt/common/CentOS_6/bwa/bwa-0.7.12/bwa",
# "0.7.12":"/opt/common/CentOS_6/bwa/bwa-0.7.12/bwa",
# "0.7.10":"/opt/common/CentOS_6/bwa/bwa-0.7.10/bwa"
# }
#programs['samtools']={"default":"/opt/common/CentOS_6/samtools/samtools-0.1.19/samtools",
# "0.1.19":"/opt/common/CentOS_6/samtools/samtools-0.1.19/samtools"}
#genomes = defaultdict(dict)
#genomes['hg19']={"fasta":"/ifs/depot/assemblies/H.sapiens/hg19/hg19.fasta"}
resource_file = os.getenv('CMO_RESOURCE_CONFIG', "/opt/common/CentOS_6-dev/cmo/cmo_resources.json")
json_config = json.load(open(resource_file))
programs = json_config['programs']
genomes = json_config['genomes']
chr1_fingerprints = json_config['chr1_fingerprints']
keys = json_config['keys']
targets = json_config['targets']
config = json_config['config']
FORMAT = '%(asctime)-15s %(funcName)-8s %(levelname)s %(message)s'
out_hdlr = logging.StreamHandler(sys.stdout)
out_hdlr.setFormatter(logging.Formatter(FORMAT))
out_hdlr.setLevel(logging.INFO)
d = {'clientip': '192.168.0.1', 'user': 'fbloggs'}
logger = logging.getLogger('cmo')
logger.addHandler(out_hdlr)
logger.setLevel(logging.INFO)
# For files under genomes, return path to the copy at /dev/shm if found
for build in genomes.keys():
for file in genomes[build].keys():
path = genomes[build][file]
if type(path) == unicode and os.path.isfile(path):
fastpath = '/dev/shm' + path
if os.path.isfile(fastpath):
genomes[build][file] = fastpath
###charris FIXME
##some lsf specific code for our immediate needs.
try:
if os.getenv("LSB_JOBID"):
logger.info("LSFJOBID: %s"% os.getenv('LSB_JOBID'))
except:
pass
def get_logger():
return logger
#GLOBAL WHAT SHUT UP
def find_chromosomes(genome_string, extended=False):
try:
fasta = genomes[genome_string]['fasta']
except:
logger.critical("Genome %s does not have a fasta entry in cmo_resources.json, unable to find chromosome list" % genome_string)
sys.exit(1)
fai = fasta + ".fai"
fai_csv = csv.reader(open(fai, "rb"), delimiter="\t")
chrom_range = list()
for row in fai_csv:
chrom_range.append(row[0])
if(extended):
return chrom_range
else:
return chrom_range[0:25]
def samtools_index(bam):
samtools = programs['samtools']['default']
cmd = [ samtools, "index", bam ]
return call_cmd(" ".join(cmd))
def infer_fasta_from_bam(bam_file):
get_chr1_cmd= [programs['samtools']['default'], "view -H", bam_file, "| fgrep \"@SQ\" | awk '{print $2,$3}'"]
chr_tags = subprocess.Popen(" ".join(get_chr1_cmd), shell=True, stdout=subprocess.PIPE, stderr=open("/dev/null")).communicate()[0]
chr_name = None
length = None
for line in chr_tags.split("\n"):
if not line:
break
(this_chr, this_length) = line.split(" ")
if re.search("SN:(chr)?1$", this_chr)!=None:
chr_name = this_chr[3:]
length = this_length[3:]
if chr_name == None:
#we didn't find a match
return(None, None)
for candidate in chr1_fingerprints:
if chr1_fingerprints[candidate]['name']==chr_name and chr1_fingerprints[candidate]['length']==int(length):
logger.info("Inferred genome to be %s" % candidate)
return (candidate, genomes[candidate]['fasta'])
logger.critical("Chromosome 1 name %s, length %s, doesn't match any standard refs?" % (chr_name, length))
return (None, None)
def infer_sample_from_bam(bam_file):
get_rg_cmd= [programs['samtools']['default'], "view -H", bam_file, "| grep \"^@RG\" "]
rg_lines = subprocess.Popen(" ".join(get_rg_cmd), shell=True, stdout=subprocess.PIPE, stderr=open("/dev/null")).communicate()[0]
sample_dict = {}
for rg in rg_lines.splitlines():
tags = rg.split("\t")
for tag in tags:
if tag[0:2]=="SM":
sample_dict[tag[3:]]=1
if len(sample_dict.keys()) > 1:
logger.critical("Mixed sample tags in Read Group header for %, can't infer a single sample name from this bam naively" % bam_file)
elif len(sample_dict.keys()) == 1:
logger.info("Found one sample key for this bam: %s" % sample_dict.keys()[0])
return sample_dict.keys()[0]
else:
#we didnt find any RG with SM: at all :(
logger.critical("No @RG lines with SM: tags found in %s, can't infer sample" % bam_file)
return None
def filesafe_string(string):
keepcharacters = ('.','_')
return "".join(c for c in string if c.isalnum() or c in keepcharacters).rstrip()
def call_cmd(cmd, shell=True, stderr=None, stdout=None, stdin=None):
if stdout and not hasattr(stdout, "write"):
stdout=open(stdout, "w")
if stderr and not hasattr(stderr, "write"):
stderr=open(stderr, "w")
if stdin and not hasattr(stdin, "read"):
stdin=open(stdin, "r")
try:
logger.info("EXECUTING: %s" % cmd)
return_code = subprocess.check_call(cmd, shell=shell, stderr=stderr, stdout=stdout, stdin=stdin, executable="/bin/bash")
return return_code
except subprocess.CalledProcessError, e:
logger.critical( "Non Zero Exit Code %s from %s" % (e.returncode, cmd))
logger.critical("Bailing out!")
sys.exit(1)
except IOError, e:
logger.critical(e)
logger.critical("I/O error({0}): {1}".format(e.errno, e.strerror))
sys.exit(1)
logging_options = [
("--stderr", "log stderr to file"),
("--stdout", "log stdout to file"),
]
def add_logging_options(parser):
for (arg, help) in logging_options:
parser.add_argument(arg, default=None, help=help)
def remove_logging_options_from_dict(dict):
for (arg, help) in logging_options:
key = arg.replace("--","")
if key in dict:
del dict[key]
TABIX_LOCATION = programs['tabix']['default']
BGZIP_LOCATION = programs['bgzip']['default']
SORTBED_LOCATION = os.path.join(programs['bedtools']['default'],'sortBed')
BCFTOOLS_LOCATION = programs['bcftools']['default']
def sort_vcf(vcf):
outfile = vcf.replace('.vcf', '.sorted.vcf')
cmd = [SORTBED_LOCATION, '-i', vcf, '-header']
try:
rv = subprocess.check_call(cmd, stdout=open(outfile,'w'))
except subprocess.CalledProcessError, e:
logger.critical("Non-zero exit code from sortBed! Bailing out.")
sys.exit(1)
cmd = ['mv', outfile, vcf]
subprocess.call(cmd)
def bgzip(vcf):
if re.search('.gz', vcf):
return vcf
outfile = '%s.gz'%(vcf)
cmd = [BGZIP_LOCATION, '-c', vcf]
logger.debug('BGZIP COMMAND: %s'%(' '.join(cmd)))
subprocess.call(cmd, stdout=open(outfile, 'w'))
return outfile
def tabix_file(vcf_file):
''' index a vcf file with tabix for random access'''
with magic.Magic(flags=magic.MAGIC_MIME_TYPE) as m:
if(m.id_filename(vcf_file).find('gz') == -1):
logger.critical('VCF File needs to be bgzipped for tabix random access. tabix-0.26/bgzip should be compiled for use')
sys.exit(1)
cmd = [TABIX_LOCATION, '-p' , 'vcf', vcf_file]
logger.debug('Tabix command: %s'%(' '.join(cmd)))
try:
rv = subprocess.check_call(cmd)
except subprocess.CalledProcessError, e:
logger.critical('Non-zero exit code from Tabix! Bailing out.')
sys.exit(1)
def fix_contig_tag_in_vcf(vcf_file):
#OK for small files only
process_one = subprocess.Popen([BCFTOOLS_LOCATION, 'view', '%s'%(vcf_file)], stdout=subprocess.PIPE)
vcf = re.sub(r'(?P<id>##contig=<ID=[^>]+)', r'\1,length=0', process_one.communicate()[0])
process_two = subprocess.Popen([BGZIP_LOCATION, '-c'], stdin=subprocess.PIPE, stdout=open(vcf_file,'w'))
process_two.communicate(input=vcf)
def fix_contig_tag_in_vcf_by_line(vcf_file):
process_one = subprocess.Popen([BCFTOOLS_LOCATION, 'view', '%s'%(vcf_file)], stdout=subprocess.PIPE)
process_two = subprocess.Popen([BGZIP_LOCATION, '-c'], stdin=subprocess.PIPE, stdout=open('fixed.vcf','w'))
with process_one.stdout as p:
for line in iter(p.readline, ''):
line = re.sub(r'(?P<id>##contig=<ID=[^>]+)', r'\1,length=0', line)
process_two.stdin.write('%s\n'%(line))
process_two.stdin.close()
process_two.wait()
cmd = ['mv', 'fixed.vcf', vcf_file]
subprocess.call(cmd)
def normalize_vcf(vcf_file, ref_fasta, version="default", method='bcf'):
output_vcf = vcf_file.replace('.vcf', '.norm.vcf.gz')
#sort_vcf(vcf_file)
vcf_gz_file = bgzip(vcf_file)
tabix_file(vcf_gz_file)
cmd = ''
if method == 'vt':
cmd = [programs['vt'][version], 'normalize', '-r', ref_fasta, vcf_gz_file, '-o', output_vcf, '-q', '-n']
logger.debug('VT Command: %s'%(' '.join(cmd)))
elif method == 'bcf':
cmd = [programs['bcftools'][version], 'norm', '--check-ref', 's', '--fasta-ref', ref_fasta, '--multiallelics', '+any', '--output-type', 'z', '--output', output_vcf, vcf_gz_file]
logger.debug('bcftools norm Command: %s'%(' '.join(cmd)))
try:
rv = subprocess.check_call(cmd)
#fix_contig_tag_in_vcf_by_line(output_vcf)
#fix_contig_tag_in_vcf(output_vcf)
except subprocess.CalledProcessError, e:
logger.critical("Non-zero exit code from normalization! Bailing out.")
sys.exit(1)
os.unlink(vcf_gz_file)
os.unlink('%s.tbi'%(vcf_gz_file))
return output_vcf
|
mskcc/cmo
|
cmo/util.py
|
Python
|
gpl-2.0
| 9,809
|
[
"BWA"
] |
35a047cc16a5386d515f44b0f02bca38bf089df604bea4363d3165fecb8f5431
|
"""
Send an email to settings.FEED_APPROVERS with the feeds that need to
be manually approved.
"""
from django.conf import settings
from django.contrib.auth.models import User
from django.core import mail
from django.core.management import BaseCommand
from django.template import Context, Template
from ...models import PENDING_FEED, Feed
class Command(BaseCommand):
def handle(self, **kwargs):
try:
verbosity = int(kwargs['verbosity'])
except (KeyError, TypeError, ValueError):
verbosity = 1
feeds = Feed.objects.filter(approval_status=PENDING_FEED)
to_email = [x.email for x in User.objects.filter(groups__name=settings.FEED_APPROVERS_GROUP_NAME)]
if len(feeds) == 0:
if verbosity >= 1:
self.stdout.write("There are no pending feeds. Skipping the email.")
return
email = """The following feeds are pending approval:
{% regroup feeds by feed_type as feed_grouping %}{% for group in feed_grouping %}
{{ group.grouper }} {% for feed in group.list %}
- {{ feed.title }} ( {{ feed.feed_url }} ) {% endfor %}
{% endfor %}
To approve them, visit: {% url 'admin:aggregator_feed_changelist' %}
"""
message = Template(email).render(Context({'feeds': feeds}))
if verbosity >= 2:
self.stdout.write("Pending approval email:\n")
self.stdout.write(message)
mail.send_mail("django community feeds pending approval", message,
'nobody@djangoproject.com', to_email,
fail_silently=False)
if verbosity >= 1:
self.stdout.write("Sent pending approval email to: %s" % (', '.join(to_email)))
|
django/djangoproject.com
|
aggregator/management/commands/send_pending_approval_email.py
|
Python
|
bsd-3-clause
| 1,711
|
[
"VisIt"
] |
59c523c7c67b34e36d82160e8c35124c5c1e49ba483c3f3ae2d3f67e3ef8e1c5
|
"""
Copyright (c) 2012-2013, Nicolas Kuttler.
All rights reserved.
License: BSD, see LICENSE for details
Source and development at https://github.com/piwik/piwik-python-api
"""
import sys
import datetime
from hashlib import md5
import logging
import os
import random
try:
import json
except ImportError:
import simplejson as json
try:
from urllib.request import Request, urlopen
from urllib.parse import urlencode, urlparse, quote
except ImportError:
from urllib2 import Request, urlopen
from urllib import urlencode, quote
from urlparse import urlparse
from .exceptions import ConfigurationError
from .exceptions import InvalidParameter
class PiwikTracker(object):
"""
The Piwik tracker class
"""
# Piwik API version
VERSION = 1
#: Length of the visitor ID
LENGTH_VISITOR_ID = 16
#: List of plugins Piwik knows
KNOWN_PLUGINS = {
'flash': 'fla',
'java': 'java',
'director': 'dir',
'quick_time': 'qt',
'real_player': 'realp',
'pdf': 'pdf',
'windows_media': 'wma',
'gears': 'gears',
'silverlight': 'ag',
}
UNSUPPORTED_WARNING = "%s: The code that's just running is untested and " \
"probably doesn't work as expected anyway."
def __init__(self, id_site, request):
"""
:param id_site: Site ID
:type id_site: int
:param request: Request
:type request: A Django-like request object
:rtype: None
"""
random.seed()
self.request = request
self.host = self.request.META.get('SERVER_NAME', '')
self.script = self.request.META.get('PATH_INFO', '')
self.query_string = self.request.META.get('QUERY_STRING', '')
self.id_site = id_site
self.api_url = ''
self.request_cookie = ''
self.ip = False
self.token_auth = False
self.__set_request_parameters()
self.forced_datetime = False
self.set_local_time(self._get_timestamp())
self.page_url = self.__get_current_url()
self.cookie_support = True
self.has_cookies = False
self.width = False
self.height = False
self.visitor_id = self.get_random_visitor_id()
self.forced_visitor_id = False
self.debug_append_url = False
self.page_custom_var = {}
self.visitor_custom_var = {}
self.plugins = {}
self.attribution_info = {}
def __set_request_parameters(self):
"""
Set some headers for the request
:rtype: None
"""
self.user_agent = self.request.META.get('HTTP_USER_AGENT', '')
self.referer = self.request.META.get('HTTP_REFERER', '')
#self.ip = self.request.META.get('REMOTE_ADDR')
self.accept_language = self.request.META.get('HTTP_ACCEPT_LANGUAGE',
'')
def set_local_time(self, datetime):
"""
Set the time
:param datetime: Time
:type datetime: datetime.datetime object
:rtype: None
"""
self.local_hour = datetime.hour
self.local_minute = datetime.minute
self.local_second = datetime.second
def set_token_auth(self, token_auth):
"""
Set the auth token for the request. The token can be viewed in the
user management section of your Piwik install.
:param token_auth: Auth token
:type token_auth: str
:rtype: None
"""
self.token_auth = token_auth
def set_api_url(self, api_url):
"""
Set which Piwik API URL to use
:param api_url: API URL
:type api_url: str
:rtype: None
"""
self.api_url = api_url
def set_ip(self, ip):
"""
Set the IP to be tracked. You probably want to use this as the
request comes from your own server.
Requires setting the auth token.
:param ip: IP
:type ip: str
:rtype: None
"""
self.ip = ip
def set_browser_has_cookies(self):
"""
Call this is the browser supports cookies
:rtype: None
"""
self.has_cookies = True
def set_browser_language(self, language):
"""
Set the browser language. Piwik uses this to guess the visitor's
origin when GeoIP is not enabled
:param language: Accept-Language
:type language: str
:rtype: None
"""
self.accept_language = language
def set_user_agent(self, user_agent):
"""
Set the user agent. By default the original request's UA is used.
:param user_agent: User agent
:type user_agent: str
:rtype: None
"""
self.user_agent = user_agent
def set_resolution(self, width, height):
"""
Set the visitor's screen width and height
:param width: Screen width
:type width: int or str
:param height: Screen height
:type height: int or str
:rtype: None
"""
self.width = width
self.height = height
def set_visitor_id(self, visitor_id):
"""
:param visitor_id: Visitor I
:type visitor_id: str
:raises: InvalidParameter if the visitor_id has an incorrect length
:rtype: None
"""
if len(visitor_id) != self.LENGTH_VISITOR_ID:
raise InvalidParameter("set_visitor_id() expects a visitor ID of "
"length %s" % self.LENGTH_VISITOR_ID)
self.forced_visitor_id = visitor_id
def set_debug_string_append(self, string):
"""
:param string: str to append
:type string: str
:rtype: None
"""
self.debug_append_url = string
def set_url_referer(self, referer):
"""
Set the referer URL
:param referer: Referer
:type referer: str
:rtype: None
"""
self.referer = referer
def set_url(self, url):
"""
Set URL being tracked
:param url: URL
:type url: str
"""
self.page_url = url
def set_attribution_info(self, json_encoded):
"""
**NOT SUPPORTED**
Set the attribution info for the visit, so that subsequent goal
conversions are properly attributed to the right referer, timestamp,
campaign name and keyword.
This must be a JSON encoded string that you would normally fetch from
the Javascript API, see function getAttributionInfo() in
http://dev.piwik.org/trac/browser/trunk/js/piwik.js
:param json_encoded: JSON encoded list containing attribution info
:type json_encoded: string
:raises: InvalidParameter if the json_encoded data is incorrect
:rtype: none
"""
logging.warn(self.UNSUPPORTED_WARNING % 'set_attribution_info()')
decoded = json.loads(json_encoded)
if type(decoded) != type(list()):
raise InvalidParameter("set_attribution_info() is expecting a "
"JSON encoded string, %s given" %
json_encoded)
if len(decoded) != 4:
raise InvalidParameter("set_attribution_info() is expecting a "
"JSON encoded string, that contains a list "
"with four items, %s given" % json_encoded)
self.attribution_info = decoded
def set_force_visit_date_time(self, datetime):
"""
Override the server date and time for the tracking request.
By default Piwik tracks requests for the "current" datetime, but
this method allows you to track visits in the past. Time are in
UTC.
Requires setting the auth token.
:param datetime: datetime
:type datetime: datetime.datetime object
:rtype: None
"""
self.forced_datetime = datetime
def __set_request_cookie(self, cookie):
"""
Set the request cookie, for testing purposes
:param cookie: Request cookie
:type cookie: str
:rtype: None
"""
self.request_cookie = cookie
def _set_host(self, host):
"""
Used for unit tests
:param host: Hostname
:type host: str
:rtype: None
"""
self.host = host
self.page_url = self.__get_current_url()
def _set_query_string(self, query_string):
"""
Used for unit tests
:param query_string: Query string
:type query_string: str
:rtype: None
"""
self.query_string = query_string
self.page_url = self.__get_current_url()
def _set_script(self, script):
"""
Used for unit tests
:param script: Script name
:type script: str
:rtype: None
"""
self.script = script
self.page_url = self.__get_current_url()
def __get_current_scheme(self):
"""
Return either http or https
:rtype: str
"""
# django-specific
if self.request.is_secure():
scheme = 'https'
else:
scheme = 'http'
return scheme
def __get_current_host(self):
"""
:rtype: str
"""
return self.host
def __get_current_script_name(self):
"""
:rtype: str
"""
return self.script
def __get_current_query_string(self):
"""
:rtype: str
"""
return self.query_string
def __get_current_url(self):
"""
Returns the URL of the page the visitor is on.
:rtype: str
"""
url = self.__get_current_scheme() + '://'
url += self.__get_current_host()
url += self.__get_current_script_name()
if self.__get_current_query_string():
url += '?'
url += self.__get_current_query_string()
return url
def _get_timestamp(self):
"""
Returns the timestamp for the request
Defaults to current datetime but can be set through
set_force_visit_date_time().
:rtype: datetime.datetime object
"""
if self.forced_datetime:
r = self.forced_datetime
else:
r = datetime.datetime.now()
return r
def _get_request(self, id_site):
"""
This oddly named method returns the query var string.
:param id_site: Site ID
:type id_site: int
:rtype: str
"""
query_vars = {
'idsite': id_site,
'rec': 1,
'apiv': self.VERSION,
'rand': random.randint(0, 99999),
'url': self.page_url,
'urlref': self.referer,
'id': self.visitor_id,
}
if self.ip:
query_vars['cip'] = self.ip
if self.token_auth:
query_vars['token_auth'] = self.token_auth
if self.has_cookies:
query_vars['cookie'] = 1
if self.width and self.height:
query_vars['res'] = '%dx%d' % (self.width, self.height)
if self.forced_visitor_id:
query_vars['cid'] = self.forced_visitor_id
if self.page_custom_var:
query_vars['cvar'] = json.dumps(self.page_custom_var)
if self.visitor_custom_var:
query_vars['_cvar'] = json.dumps(self.visitor_custom_var)
if len(self.plugins):
for plugin, version in self.plugins.items():
query_vars[plugin] = version
if len(self.attribution_info):
for i, var in {
0: '_rcn',
1: '_rck',
2: '_refts',
3: '_ref',
}.items():
query_vars[var] = quote(self.attribution_info[i])
url = urlencode(query_vars)
if self.debug_append_url:
url += self.debug_append_url
return url
def __get_url_track_page_view(self, document_title=''):
"""
Returns the URL to piwik.php with all parameters set to track the
pageview
:param document_title: The title of the page the user is on
:type document_title: str
:rtype: str
"""
url = self._get_request(self.id_site)
if document_title:
url += '&%s' % urlencode({'action_name': document_title})
return url
def __get_url_track_action(self, action_url, action_type):
"""
:param action_url: URL of the download or outlink
:type action_url: str
:param action_type: Type of the action, either 'download' or 'link'
:type action_type: str
"""
url = self._get_request(self.id_site)
url += "&%s" % urlencode({action_type: action_url})
return url
def __get_cookie_matching_name(self, name):
"""
**NOT SUPPORTED**
Get a cookie's value by name
:param name: Cookie name
:type name: str
:rtype: str
"""
logging.warn(self.UNSUPPORTED_WARNING % '__get_cookie_matching_name()')
cookie_value = False
if self.request.COOKIES:
for name in self.request.COOKIES:
#print 'cookie name', name
#print 'cookie is', cookie_value
cookie_value = self.request.COOKIES[name]
#print self.request.COOKIES
return cookie_value
def get_visitor_id(self):
"""
**PARTIAL, no cookie support**
If the user initiating the request has the Piwik first party cookie,
this function will try and return the ID parsed from this first party
cookie.
If you call this function from a server, where the call is triggered by
a cron or script not initiated by the actual visitor being tracked,
then it will return the random Visitor ID that was assigned to this
visit object.
This can be used if you wish to record more visits, actions or goals
for this visitor ID later on.
:rtype: str
"""
if self.forced_visitor_id:
visitor_id = self.forced_visitor_id
else:
logging.warn(self.UNSUPPORTED_WARNING % 'get_visitor_id()')
id_cookie_name = 'id.%s.' % self.id_site
id_cookie = self.__get_cookie_matching_name(id_cookie_name)
visitor_id = self.visitor_id
if id_cookie:
#print 'id_cookie is', id_cookie
visitor_id = id_cookie
#$visitorId = substr($idCookie, 0, strpos($idCookie, '.'));
#if(strlen($visitorId) == self::LENGTH_VISITOR_ID)
#{
# return $visitorId;
return visitor_id
def get_attribution_info(self):
"""
**NOT SUPPORTED**
To support this we'd need to parse the cookies in the request obejct.
Not sure if this makes sense...
Return the currently assigned attribution info stored in a first party
cookie.
This method only works if the user is initiating the current request
and his cookies can be read by this API.
:rtype: string, JSON encoded string containing the referer info for
goal conversion attribution
"""
logging.warn(self.UNSUPPORTED_WARNING % 'get_attribution_info()')
attribution_cookie_name = 'ref.%d.' % self.id_site
return self.__get_cookie_matching_name(attribution_cookie_name)
def __get_random_string(self, length=500):
"""
Return a random string
:param length: Length
:type length: inte
:rtype: str
"""
return md5(os.urandom(length)).hexdigest()
def get_random_visitor_id(self):
"""
Return a random visitor ID
:rtype: str
"""
visitor_id = self.__get_random_string()
return visitor_id[:self.LENGTH_VISITOR_ID]
def disable_cookie_support(self):
"""
**NOT TESTED**
By default, PiwikTracker will read third party cookies from the
response and sets them in the next request.
:rtype: None
"""
logging.warn(self.UNSUPPORTED_WARNING % 'disable_cookie_support()')
self.cookie_support = False
def do_track_page_view(self, document_title):
"""
Track a page view, return the request body
:param document_title: The title of the page the user is on
:type document_title: str
:rtype: str
"""
url = self.__get_url_track_page_view(document_title)
return self._send_request(url)
def do_track_action(self, action_url, action_type):
"""
Track a download or outlink
:param action_url: URL of the download or outlink
:type action_url: str
:param action_type: Type of the action, either 'download' or 'link'
:type action_type: str
:raises: InvalidParameter if action type is unknown
:rtype: str
"""
if action_type not in ('download', 'link'):
raise InvalidParameter("Illegal action parameter %s" % action_type)
url = self.__get_url_track_action(action_url, action_type)
return self._send_request(url)
def _send_request(self, url):
"""
Make the tracking API request, return the request body
:param url: TODO
:type url: str
:raises: ConfigurationError if the API URL was not set
:rtype: str
"""
if not self.api_url:
raise ConfigurationError('API URL not set')
parsed = urlparse(self.api_url)
url = "%s://%s%s?%s" % (parsed.scheme, parsed.netloc, parsed.path, url)
request = Request(url)
request.add_header('User-Agent', self.user_agent)
request.add_header('Accept-Language', self.accept_language)
if not self.cookie_support:
self.request_cookie = ''
elif self.request_cookie != '':
#print 'Adding cookie', self.request_cookie
request.add_header('Cookie', self.request_cookie)
response = urlopen(request)
#print response.info()
body = response.read()
# The cookie in the response will be set in the next request
#for header, value in response.getheaders():
# # TODO handle cookies
# # set cookie to la
# # in case several cookies returned, we keep only the latest one
# # (ie. XDEBUG puts its cookie first in the list)
# #print header, value
# self.request_cookie = ''
# Work around urllib updates, we need a string
if sys.version_info[0] >= 3 and type(body) == bytes:
body = str(body)
return body
def set_custom_variable(self, id, name, value, scope='visit'):
"""
Set a custom variable
See http://piwik.org/docs/custom-variables/
:param id: Custom variable slot ID, 1-5
:type id: int
:param name: Variable name
:type name: str
:param value: Variable value
:type value: str
:param scope: Variable scope, either visit or page,
defaults to visit
:type scope: str or None
:rtype: None
"""
if type(id) != type(int()):
raise InvalidParameter("Parameter id must be int, not %s" %
type(id))
if scope == 'page':
self.page_custom_var[id] = (name, value)
elif scope == 'visit':
self.visitor_custom_var[id] = (name, value)
else:
raise InvalidParameter("Invalid scope parameter value %s" % scope)
def set_plugins(self, **kwargs):
"""
Set supported plugins
>>> piwiktrackerinstance.set_plugins(flash=True)
See KNOWN_PLUGINS keys for valid values.
:param kwargs: A plugin: version dict, e.g. {'java': 6}, see also
KNOWN_PLUGINS
:type kwargs: dict of {str: int}
:rtype: None
"""
for plugin, version in kwargs.items():
if plugin not in list(self.KNOWN_PLUGINS.keys()):
raise ConfigurationError("Unknown plugin %s, please use one "
"of %s" % (plugin,
list(self.KNOWN_PLUGINS.keys())))
self.plugins[self.KNOWN_PLUGINS[plugin]] = int(version)
def get_custom_variable(self, id, scope='visit'):
"""
**PARTIAL, no cookie support**
Returns the current custom variable stored in a first party cookie.
:param id: Custom variable slot ID, 1-5
:type id: int
:param scope: Variable scope, either visit or page
:type scope: str
:rtype: mixed stuff TODO
"""
if type(id) != type(int()):
raise InvalidParameter("Parameter id must be int, not %s" %
type(id))
if scope == 'page':
r = self.page_custom_var[id]
elif scope == 'visit':
if self.visitor_custom_var[id]:
r = self.visitor_custom_var[id]
else:
logging.warn(self.UNSUPPORTED_WARNING %
'get_custom_variable()')
# TODO test this code...
custom_vars_cookie = 'cvar.%d.' % self.id_site
cookie = self.__get_cookie_matching_name(custom_vars_cookie)
if not cookie:
r = False
else:
cookie_decoded = json.loads(cookie)
#$cookieDecoded = json_decode($cookie, $assoc = true);
#print 'decoded cookie json', cookie_decode
#print 'decoded cookie json', repr(cookie_decode)
if type(cookie_decoded) == type(list()):
r = False
elif id not in cookie_decoded:
r = False
elif len(cookie_decoded[id]) != 2:
r = False
else:
r = cookie_decoded[id]
else:
raise InvalidParameter("Invalid scope parameter value %s" % scope)
return r
class PiwikTrackerEcommerce(PiwikTracker):
"""
The Piwik tracker class for ecommerce
"""
def __init__(self, id_site, request):
self.ecommerce_items = {}
super(PiwikTrackerEcommerce, self).__init__(id_site, request)
def __get_url_track_ecommerce_order(self, order_id, grand_total,
sub_total=False, tax=False,
shipping=False, discount=False):
"""
Returns an URL used to track ecommerce orders
Calling this method will reinitialize the property ecommerce_items to
an empty list. So items will have to be added again via
add_ecommerce_item().
:param order_id: Unique order ID (required). Used to avoid
re-recording an order on page reload.
:type order_id: str
:param grand_total: Grand total revenue of the transaction,
including taxes, shipping, etc.
:type grand_total: float
:param sub_total: Sub total amount, typicalle the sum of
item prices for all items in this order, before tax and shipping
:type sub_total: float or None
:param tax: Tax amount for this order
:type tax: float or None
:param shipping: Shipping amount for this order
:type shipping: float or None
:param discount: Discount for this order
:type discount: float or None
:rtype: str
"""
url = self.__get_url_track_ecommerce(grand_total, sub_total, tax,
shipping, discount)
url += '&%s' % urlencode({'ec_id': order_id})
self.ecommerce_last_order_timestamp = self._get_timestamp()
return url
def __get_url_track_goal(self, id_goal, revenue=False):
"""
Return the goal tracking URL
:param id_goal: Goal ID
:type id_goal: int
:param revenue: Revenue for this conversion
:type revenue: int (TODO why int here and not float!?)
"""
url = self._get_request(self.id_site)
params = {}
params['idgoal'] = id_goal
if revenue:
params['revenue'] = revenue
url += '&%s' % urlencode(params)
return url
def __get_url_track_ecommerce(self, grand_total, sub_total=False,
tax=False, shipping=False, discount=False):
"""
Returns the URL used to track ecommerce orders
Calling this method reinitializes the property ecommerce_items, so
items will have to be added again via add_ecommerce_item()
:param grand_total: Grand total revenue of the transaction,
including taxes, shipping, etc.
:type grand_total: float
:param sub_total: Sub total amount, typicalle the sum of
item prices for all items in this order, before tax and shipping
:type sub_total: float or None
:param tax: Tax amount for this order
:type tax: float or None
:param shipping: Shipping amount for this order
:type shipping: float or None
:param discount: Discount for this order
:type discount: float or None
:rtype: str
"""
# FIXME fix what?
url = self._get_request(self.id_site)
args = {
'idgoal': 0,
}
args['revenue'] = grand_total
if sub_total:
args['ec_st'] = sub_total
if tax:
args['ec_tx'] = tax
if shipping:
args['ec_sh'] = shipping
if discount:
args['ec_dt'] = discount
if len(self.ecommerce_items):
# Remove the SKU index in the list before JSON encoding
items = list(self.ecommerce_items.values())
args['ec_items'] = json.dumps(items)
self.ecommerce_items.clear()
url += '&%s' % urlencode(args)
return url
def __get_url_track_ecommerce_cart_update(self, grand_total):
"""
Returns the URL to track a cart update
:type grand_total: float
:param grand_total: Grand total revenue of the transaction,
including taxes, shipping, etc.
:type grand_total: float
:rtype: str
"""
url = self.__get_url_track_ecommerce(grand_total)
return url
def add_ecommerce_item(self, sku, name=False, category=False, price=False,
quantity=1):
"""
Add an item to the ecommerce order.
This should be called before do_track_ecommerce_order() or before
do_track_ecommerce_cart_update().
This method can be called for all individual products in the
cart/order.
:param sku: Product SKU
:type SKU: str or None
:param name: Name of the product
:type name: str or None
:param category: Name of the category for the current
category page or the product
:type category: str, list or None
:param price: Price of the product
:type price: float or None
:param quantity: Product quantity, defaults to 1
:type price: int or None
:rtype: None
"""
self.ecommerce_items[sku] = (
sku,
name,
category,
price,
quantity,
)
def do_track_ecommerce_cart_update(self, grand_total):
"""
Track a cart update (add/remove/update item)
On every cart update you must call add_ecommerce_item() for each item
in the cart, including items which were in the previous cart. Items
get deleted until they are re-submitted.
:type grand_total: float
:param grand_total: Grand total revenue of the transaction,
including taxes, shipping, etc.
:type grand_total: float
:rtype: str
"""
# FIXME
url = self.__get_url_track_ecommerce_cart_update(grand_total)
return self._send_request(url)
def do_track_ecommerce_order(self, order_id, grand_total, sub_total=False,
tax=False, shipping=False, discount=False):
"""
Track an ecommerce order
If the order contains items you must call add_ecommerce_item() first
for each item.
All revenues will be individually summed and reported by Piwik.
:param order_id: Unique order ID (required). Used to avoid
re-recording an order on page reload.
:type order_id: str
:param grand_total: Grand total revenue of the transaction,
including taxes, shipping, etc.
:type grand_total: float
:param sub_total: Sub total amount, typicalle the sum of
item prices for all items in this order, before tax and shipping
:type sub_total: float or None
:param tax: Tax amount for this order
:type tax: float or None
:param shipping: Shipping amount for this order
:type shipping: float or None
:param discount: Discount for this order
:type discount: float or None
:rtype: str
"""
url = self.__get_url_track_ecommerce_order(order_id, grand_total,
sub_total, tax, shipping,
discount)
return self._send_request(url)
def do_track_goal(self, id_goal, revenue=False):
"""
Record a goal conversion
:param id_goal: Goal ID
:type id_goal: int
:param revenue: Revenue for this conversion
:type revenue: int (TODO why int here and not float!?)
:rtype: str
"""
url = self.__get_url_track_goal(id_goal, revenue)
return self._send_request(url)
def set_ecommerce_view(self, sku=False, name=False, category=False,
price=False):
"""
Set the page view as an item/product page view, or an ecommerce
category page view.
This method will set three custom variables of 'page' scope with the
SKU, name and category for this page view.
On a category page you may set the category argument only.
Tracking product/category page views will allow Piwik to report on
product and category conversion rates.
To enable ecommerce tracking see doc/install.rst
:param SKU: Product SKU being viewed
:type SKU: str or None
:param name: Name of the product
:type name: str or None
:param category: Name of the category for the current
category page or the product
:type category: str, list or None
:param price: Price of the product
:type price: float or None
:rtype: None
"""
if category:
if type(category) == type(list()):
category = json.dumps(category)
else:
category = ''
self.page_custom_var[5] = ('_pkc', category)
if price:
self.page_custom_var[2] = ('_pkp', price)
# On a category page do not record "Product name not defined"
if sku and name:
if sku:
self.page_custom_var[3] = ('_pks', sku)
if name:
self.page_custom_var[4] = ('_pkn', name)
def piwik_get_url_track_page_view(id_site, request, document_title=''):
tracker = PiwikTracker(id_site, request)
return tracker.do_track_page_view(document_title)
|
piwik/piwik-python-api
|
piwikapi/tracking.py
|
Python
|
bsd-3-clause
| 31,981
|
[
"VisIt"
] |
99b3ad5091cfe147bfcf45b8b5936f83aaee286b5253dfe5b3b24e526f5f8c2c
|
#!/usr/bin/python
"""Test of column header output."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(KeyComboAction("End"))
sequence.append(KeyComboAction("<Shift>Right"))
sequence.append(KeyComboAction("Down"))
sequence.append(KeyComboAction("Down"))
sequence.append(KeyComboAction("Return"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Shift>ISO_Left_Tab"))
sequence.append(utils.AssertPresentationAction(
"Bug number column header",
["BRAILLE LINE: 'gtk-demo application GtkListStore demo frame table Bug number column header < > Fixed? 60482 Normal scrollable notebooks and hidden tabs '",
" VISIBLE: '60482 Normal scrollable notebook', cursor=1",
"BRAILLE LINE: 'gtk-demo application GtkListStore demo frame table Bug number table column header'",
" VISIBLE: 'Bug number table column header', cursor=1",
"SPEECH OUTPUT: 'Bug number column header 60482'",
"SPEECH OUTPUT: 'Bug number table column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Right"))
sequence.append(utils.AssertPresentationAction(
"Severity column header",
["BRAILLE LINE: 'gtk-demo application GtkListStore demo frame table Severity table column header'",
" VISIBLE: 'Severity table column header', cursor=1",
"SPEECH OUTPUT: 'Severity table column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Right"))
sequence.append(utils.AssertPresentationAction(
"Description column header",
["BRAILLE LINE: 'gtk-demo application GtkListStore demo frame table Description table column header'",
" VISIBLE: 'Description table column header', cursor=1",
"SPEECH OUTPUT: 'Description table column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Enter table",
["BUG? - For some reason, the VISIBLE braille is not scrolling to the focused cell.",
"BRAILLE LINE: 'gtk-demo application GtkListStore demo frame table Fixed? column header < > Fixed? 60482 Normal scrollable notebooks and hidden tabs '",
" VISIBLE: '< > Fixed? 60482 Normal scrollab', cursor=1",
"SPEECH OUTPUT: 'Fixed? column header Fixed? check box not checked 60482 Normal scrollable notebooks and hidden tabs image'"]))
# GtkTreeView swallows this keypress (for all users; not just Orca users).
sequence.append(KeyComboAction("Left"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Left"))
sequence.append(utils.AssertPresentationAction(
"Normal cell",
["BRAILLE LINE: 'gtk-demo application GtkListStore demo frame table Severity column header < > Fixed? 60482 Normal scrollable notebooks and hidden tabs '",
" VISIBLE: 'Normal scrollable notebooks and ', cursor=1",
"SPEECH OUTPUT: 'Severity column header Normal'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"Normal cell basic Where Am I",
["BRAILLE LINE: 'gtk-demo application GtkListStore demo frame table Severity column header < > Fixed? 60482 Normal scrollable notebooks and hidden tabs '",
" VISIBLE: 'Normal scrollable notebooks and ', cursor=1",
"SPEECH OUTPUT: 'table Severity table cell Normal column 3 of 6 row 1 of 14'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"Normal cell detailed Where Am I",
["BRAILLE LINE: 'gtk-demo application GtkListStore demo frame table Severity column header < > Fixed? 60482 Normal scrollable notebooks and hidden tabs '",
" VISIBLE: 'Normal scrollable notebooks and ', cursor=1",
"BRAILLE LINE: 'gtk-demo application GtkListStore demo frame table Severity column header < > Fixed? 60482 Normal scrollable notebooks and hidden tabs '",
" VISIBLE: 'Normal scrollable notebooks and ', cursor=1",
"SPEECH OUTPUT: 'table Severity table cell Normal column 3 of 6 row 1 of 14'",
"SPEECH OUTPUT: 'table Severity table cell Normal column 3 of 6 row 1 of 14 Fixed? check box not checked 60482 Normal'",
"SPEECH OUTPUT: 'scrollable notebooks and hidden tabs'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Left"))
sequence.append(utils.AssertPresentationAction(
"60482 cell",
["BRAILLE LINE: 'gtk-demo application GtkListStore demo frame table Bug number column header < > Fixed? 60482 Normal scrollable notebooks and hidden tabs '",
" VISIBLE: '60482 Normal scrollable notebook', cursor=1",
"SPEECH OUTPUT: 'Bug number column header 60482'"]))
sequence.append(KeyComboAction("<Alt>F4"))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
h4ck3rm1k3/orca-sonar
|
test/keystrokes/gtk-demo/role_column_header.py
|
Python
|
lgpl-2.1
| 4,996
|
[
"ORCA"
] |
81079fc2e0ca001aabf2d27159fccfcbfb31fb460c608043ce2e073d1cf5274c
|
# coding: utf-8
# Deep Learning
# =============
#
# Assignment 4
# ------------
#
# Previously in `2_fullyconnected.ipynb` and `3_regularization.ipynb`, we trained fully connected networks to classify [notMNIST](http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html) characters.
#
# The goal of this assignment is make the neural network convolutional.
# In[ ]:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
import argparse
import neuralNetwork as nn
import os
import vtk
import glob
import sys
sys.path.append('../generatelib')
import inputData
print("Tensorflow version:", tf.__version__)
parser = argparse.ArgumentParser(description='Shape Variation Analyzer', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--model', help='Model file computed with train.py', required=True)
group = parser.add_mutually_exclusive_group()
group.add_argument('--sampleMesh', help='Evaluate an image sample in vtk format')
group.add_argument('--sampleDir', help='Evaluate a directory with vtk files')
parser.add_argument('--out', help='Write output of evaluation', default=None, type=str)
parser.add_argument('--feature_names', help='Extract the following features from the polydatas', nargs='+', default=["Normals", "Mean_Curvature", "distanceGroup"], type=str)
parser.add_argument('--num_labels', help='Number of labels', type=int, default=7)
args = parser.parse_args()
sampleMesh = args.sampleMesh
sampleDir = args.sampleDir
outfilename = args.out
model = args.model
feature_names = args.feature_names
num_labels = args.num_labels
inputdata = inputData.inputData()
if sampleMesh != None:
valid_dataset = inputdata.load_features(sampleMesh)
valid_dataset = valid_dataset.reshape(1, -1)
batch_size = 1
else:
vtklist = glob.glob(os.path.join(sampleDir, "*.vtk"))
valid_dataset = inputdata.load_features_classe(vtklist, feature_names=feature_names)
valid_dataset = valid_dataset.reshape(valid_dataset.shape[0], -1)
batch_size = valid_dataset.shape[0]
size_features = valid_dataset.shape[1]
print('Validation set', valid_dataset.shape)
# Let's build a small network with two convolutional layers, followed by one fully connected layer. Convolutional networks are more expensive computationally, so we'll limit its depth and number of fully connected nodes.
# In[ ]:
#batch_size = 64
# patch_size = 8
# depth = 32
# depth2 = 64
# num_hidden = 256
# stride = [1, 1, 1, 1]
# def evaluate_accuracy(prediction, labels):
# accuracy = tf.reduce_sum(tf.squared_difference(prediction, labels))
# return accuracy.eval()
graph = tf.Graph()
with graph.as_default():
# run inference on the input data
x = tf.placeholder(tf.float32,shape=(batch_size, size_features))
keep_prob = tf.placeholder(tf.float32)
#tf_valid_dataset = tf.constant(valid_dataset)
# tf_test_dataset = tf.constant(test_dataset)
y_conv = nn.inference(x, size_features, num_labels, keep_prob, batch_size)
predict = tf.argmax(y_conv, 1)
# calculate the loss from the results of inference and the labels
#loss = nn.loss(y_conv, y_)
#accuracy_eval = nn.evaluation(y_conv, y_)
#tf.summary.scalar(loss.op.name, loss)
#intersection_sum, label_sum, example_sum, precision = nn.evaluation(y_conv, y_)
#tf.summary.scalar ("Precision op", precision)
# setup the training operations
#train_step = nn.training(loss, learning_rate, decay_steps, decay_rate)
# setup the summary ops to use TensorBoard
# setup the training operations
#train_step = nn.training(loss, learning_rate, decay_steps, decay_rate)
#train_step = nn.training(loss, learning_rate, decay_steps, decay_rate)
# intersection_sum, label_sum, example_sum = evaluation(y_conv, y_)
# valid_prediction = model(tf_valid_dataset)
#cross_entropy = tf.reduce_sum(tf.squared_difference(y_conv, y_))
#regularizers = tf.nn.l2_loss(W_fc1) + tf.nn.l2_loss(W_fc2)
#cross_entropy += 0.1 * regularizers
#cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_conv, y_))
#train_step = tf.train.GradientDescentOptimizer(1e-4).minimize(cross_entropy)
# accuracy = cross_entropy
# valid_prediction = model(tf_valid_dataset)
# evaluation(valid_prediction)
# test_prediction = model(tf_test_dataset)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess, model)
pred = sess.run([predict], feed_dict={x: valid_dataset, keep_prob: 1})
if outfilename != None:
with open(outfilename, "w") as outfile:
for pr in pred[0]:
outfile.write(str(pr))
outfile.write("\n")
else:
for pr in pred[0]:
print(pr)
#test_accuracy = evaluate_accuracy(test_prediction.eval(feed_dict={keep_prob: 1.0}), test_labels)
#print("test accuracy %g"%test_accuracy)
|
pdedumast/ShapeVariationAnalyzer
|
src/py/trainlib/prediction.py
|
Python
|
apache-2.0
| 5,070
|
[
"VTK"
] |
4ee6fea07ff36350a412ec7e3726b23fdf807c585de65ed88f1aaa5f7ec9a99d
|
''' Test_RSS_Policy_FreeDiskSpacePolicy
'''
# pylint: disable=protected-access
import unittest
import DIRAC.ResourceStatusSystem.Policy.FreeDiskSpacePolicy as moduleTested
################################################################################
class FreeDiskSpacePolicy_TestCase(unittest.TestCase):
def setUp(self):
'''
Setup
'''
self.moduleTested = moduleTested
self.testClass = self.moduleTested.FreeDiskSpacePolicy
def tearDown(self):
'''
Tear down
'''
del self.moduleTested
del self.testClass
################################################################################
class FreeDiskSpacePolicy_Success(FreeDiskSpacePolicy_TestCase):
def test_instantiate(self):
''' tests that we can instantiate one object of the tested class
'''
module = self.testClass()
self.assertEqual('FreeDiskSpacePolicy', module.__class__.__name__)
def test_evaluate(self):
''' tests the method _evaluate
'''
module = self.testClass()
res = module._evaluate({'OK': False, 'Message': 'Bo!'})
self.assertTrue(res['OK'])
self.assertEqual('Error', res['Value']['Status'])
self.assertEqual('Bo!', res['Value']['Reason'])
res = module._evaluate({'OK': True, 'Value': None})
self.assertTrue(res['OK'])
self.assertEqual('Unknown', res['Value']['Status'])
self.assertEqual('No values to take a decision', res['Value']['Reason'])
res = module._evaluate({'OK': True, 'Value': []})
self.assertTrue(res['OK'])
self.assertEqual('Unknown', res['Value']['Status'])
self.assertEqual('No values to take a decision', res['Value']['Reason'])
res = module._evaluate({'OK': True, 'Value': [{'A': 1}]})
self.assertTrue(res['OK'])
self.assertEqual('Error', res['Value']['Status'])
self.assertEqual('Key Total missing', res['Value']['Reason'])
res = module._evaluate({'OK': True, 'Value': {'Total': 1}})
self.assertTrue(res['OK'])
self.assertEqual('Error', res['Value']['Status'])
self.assertEqual('Key Free missing', res['Value']['Reason'])
res = module._evaluate({'OK': True, 'Value': {'Total': 100, 'Fre': 0.0}})
self.assertTrue(res['OK'])
self.assertEqual('Error', res['Value']['Status'])
self.assertEqual('Key Free missing', res['Value']['Reason'])
res = module._evaluate({'OK': True, 'Value': {'Total': 100, 'Free': 0.0}})
self.assertTrue(res['OK'])
self.assertEqual('Banned', res['Value']['Status'])
self.assertEqual('Too little free space', res['Value']['Reason'])
res = module._evaluate({'OK': True, 'Value': {'Total': 100, 'Free': 4.0,
'Guaranteed': 1}})
self.assertTrue(res['OK'])
self.assertEqual('Degraded', res['Value']['Status'])
self.assertEqual('Little free space',
res['Value']['Reason'])
res = module._evaluate({'OK': True, 'Value': {'Total': 100, 'Free': 100,
'Guaranteed': 1}})
self.assertTrue(res['OK'])
self.assertEqual('Active', res['Value']['Status'])
self.assertEqual('Enough free space',
res['Value']['Reason'])
################################################################################
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(FreeDiskSpacePolicy_TestCase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(FreeDiskSpacePolicy_Success))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
andresailer/DIRAC
|
ResourceStatusSystem/Policy/test/Test_RSS_Policy_FreeDiskSpacePolicy.py
|
Python
|
gpl-3.0
| 3,636
|
[
"DIRAC"
] |
08fb93d83c9842820580c9aee1c0380781fd2a67d1ee9d7f6657ac9af740a6e7
|
#!/usr/bin/env python
"""
This is a master vasp running script to perform various combinations of VASP
runs.
"""
from __future__ import division
import logging
import sys
import yaml
from custodian.custodian import Custodian
from custodian.vasp.jobs import VaspJob
from pymatgen.io.vasp import VaspInput, Incar, Kpoints
__author__ = "Shyue Ping Ong"
__version__ = "0.5"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__status__ = "Beta"
__date__ = "12/31/13"
def load_class(mod, name):
toks = name.split("?")
params = {}
if len(toks) == 2:
for p in toks[-1].split(","):
ptoks = p.split("=")
params[ptoks[0]] = yaml.load(ptoks[1])
elif len(toks) > 2:
print("Bad handler specification")
sys.exit(-1)
mod = __import__(mod, globals(), locals(), [toks[0]], 0)
return getattr(mod, toks[0])(**params)
def get_jobs(args):
# Returns a generator of jobs. Allows of "infinite" jobs.
vasp_command = args.command.split()
# save initial INCAR for rampU runs
n_ramp_u = args.jobs.count('rampU')
ramps = 0
if n_ramp_u:
incar = Incar.from_file('INCAR')
ldauu = incar['LDAUU']
ldauj = incar['LDAUJ']
njobs = len(args.jobs)
post_settings = [] # append to this list to have settings applied on next job
for i, job in enumerate(args.jobs):
final = False if i != njobs - 1 else True
if any(c.isdigit() for c in job):
suffix = "." + job
else:
suffix = ".{}{}".format(job, i + 1)
settings = post_settings
post_settings = []
backup = True if i == 0 else False
copy_magmom = False
vinput = VaspInput.from_directory(".")
if i > 0:
settings.append(
{"file": "CONTCAR",
"action": {"_file_copy": {"dest": "POSCAR"}}})
job_type = job.lower()
auto_npar = True
if args.no_auto_npar:
auto_npar = False
if job_type.startswith("static_derived"):
from pymatgen.io.vasp.sets import MPStaticSet
vis = MPStaticSet.from_prev_calc(
".", user_incar_settings={"LWAVE": True, "EDIFF": 1e-6},
ediff_per_atom=False)
settings.extend([
{"dict" : "INCAR",
"action": {"_set": dict(vis.incar)}},
{'dict': 'KPOINTS',
'action': {'_set': vis.kpoints.as_dict()}}])
if job_type.startswith("static_dielectric_derived"):
from pymatgen.io.vasp.sets import MPStaticSet, MPStaticDielectricDFPTVaspInputSet
# vis = MPStaticSet.from_prev_calc(
# ".", user_incar_settings={"EDIFF": 1e-6, "IBRION": 8,
# "LEPSILON": True, 'LREAL':False,
# "LPEAD": True, "ISMEAR": 0,
# "SIGMA": 0.01},
# ediff_per_atom=False)
vis = MPStaticDielectricDFPTVaspInputSet()
incar = vis.get_incar(vinput["POSCAR"].structure)
unset = {}
for k in ["NPAR", "KPOINT_BSE", "LAECHG", "LCHARG", "LVHAR",
"NSW"]:
incar.pop(k, None)
if k in vinput["INCAR"]:
unset[k] = 1
kpoints = vis.get_kpoints(vinput["POSCAR"].structure)
settings.extend([
{"dict": "INCAR",
"action": {"_set": dict(incar),
"_unset": unset}},
{'dict': 'KPOINTS',
'action': {'_set': kpoints.as_dict()}}])
auto_npar = False
elif job_type.startswith("static"):
m = [i * args.static_kpoint for i in vinput["KPOINTS"].kpts[0]]
settings.extend([
{"dict": "INCAR",
"action": {"_set": {"NSW": 0}}},
{'dict': 'KPOINTS',
'action': {'_set': {'kpoints': [m]}}}])
elif job_type.startswith("nonscf_derived"):
from pymatgen.io.vasp.sets import MPNonSCFSet
vis = MPNonSCFSet.from_prev_calc(".", copy_chgcar=False,
user_incar_settings={"LWAVE": True})
settings.extend([
{"dict": "INCAR",
"action": {"_set": dict(vis.incar)}},
{'dict': 'KPOINTS',
'action': {'_set': vis.kpoints.as_dict()}}])
elif job_type.startswith("optics_derived"):
from pymatgen.io.vasp.sets import MPNonSCFSet
vis = MPNonSCFSet.from_prev_calc(
".", optics=True, copy_chgcar=False,
nedos=2001, mode="uniform", nbands_factor=5,
user_incar_settings={"LWAVE": True, "ALGO": "Exact", "SIGMA": 0.01, "EDIFF": 1e-6},
ediff_per_atom=False)
settings.extend([
{"dict": "INCAR",
"action": {"_set": dict(vis.incar)}},
{'dict': 'KPOINTS',
'action': {'_set': vis.kpoints.as_dict()}}])
elif job_type.startswith("rampu"):
f = ramps / (n_ramp_u - 1)
settings.append(
{"dict": "INCAR",
"action": {"_set": {"LDAUJ": [j * f for j in ldauj],
"LDAUU": [u * f for u in ldauu]}}})
copy_magmom = True
ramps += 1
elif job_type.startswith("quick_relax") or job_type.startswith(\
"quickrelax"):
kpoints = vinput["KPOINTS"]
incar = vinput["INCAR"]
structure = vinput["POSCAR"].structure
if "ISMEAR" in incar:
post_settings.append(
{"dict": "INCAR",
"action": {"_set": {"ISMEAR": incar["ISMEAR"]}}})
else:
post_settings.append(
{"dict": "INCAR",
"action": {"_unset": {"ISMEAR": 1}}})
post_settings.append({"dict": "KPOINTS",
"action": {"_set": kpoints.as_dict()}})
# lattice vectors with length < 9 will get >1 KPOINT
low_kpoints = Kpoints.gamma_automatic(
[max(int(18/l), 1) for l in structure.lattice.abc])
settings.extend([
{"dict": "INCAR",
"action": {"_set": {"ISMEAR": 0}}},
{'dict': 'KPOINTS',
'action': {'_set': low_kpoints.as_dict()}}])
# let vasp determine encut (will be lower than
# needed for compatibility with other runs)
if "ENCUT" in incar:
post_settings.append(
{"dict": "INCAR",
"action": {"_set": {"ENCUT": incar["ENCUT"]}}})
settings.append(
{"dict": "INCAR",
"action": {"_unset": {"ENCUT": 1}}})
elif job_type.startswith("relax"):
pass
elif job_type.startswith("full_relax"):
for j in VaspJob.full_opt_run(
vasp_command):
yield j
else:
print("Unsupported job type: {}".format(job))
sys.exit(-1)
if not job_type.startswith("full_relax"):
yield VaspJob(vasp_command, final=final, suffix=suffix,
backup=backup, settings_override=settings,
copy_magmom=copy_magmom, auto_npar=auto_npar)
def do_run(args):
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=FORMAT, level=logging.INFO, filename="run.log")
logging.info("Handlers used are %s" % args.handlers)
handlers = [load_class("custodian.vasp.handlers", n) for n in
args.handlers]
validators = [load_class("custodian.vasp.validators", n) for n in
args.validators]
c = Custodian(handlers, get_jobs(args), validators,
max_errors=args.max_errors, scratch_dir=args.scratch,
gzipped_output=args.gzip,
checkpoint=True)
c.run()
def main():
import argparse
parser = argparse.ArgumentParser(description="""
run_vasp is a master script to perform various kinds of VASP runs.
""",
epilog="""
Author: Shyue Ping Ong
Version: {}
Last updated: {}""".format(__version__, __date__))
parser.add_argument(
"-c", "--command", dest="command", nargs="?",
default="pvasp", type=str,
help="VASP command. Defaults to pvasp. If you are using mpirun, "
"set this to something like \"mpirun pvasp\".")
parser.add_argument(
"--no_auto_npar", action="store_true",
help="Set to true to turn off auto_npar. Useful for certain machines "
"and calculations where you want absolute control.")
parser.add_argument(
"-z", "--gzip", dest="gzip", action="store_true",
help="Add this option to gzip the final output. Do not gzip if you "
"are going to perform an additional static run."
)
parser.add_argument(
"-s", "--scratch", dest="scratch", nargs="?",
default=None, type=str,
help="Scratch directory to perform run in. Specify the root scratch "
"directory as the code will automatically create a temporary "
"subdirectory to run the job.")
parser.add_argument(
"-ks", "--kpoint-static", dest="static_kpoint", nargs="?",
default=1, type=int,
help="The multiplier to use for the KPOINTS of a static run (if "
"any). For example, setting this to 2 means that if your "
"original run was done using a k-point grid of 2x3x3, "
"the static run will be done with a k-point grid of 4x6x6. This "
"defaults to 1, i.e., static runs are performed with the same "
"k-point grid as relaxation runs."
)
parser.add_argument(
"-me", "--max-errors", dest="max_errors", nargs="?",
default=10, type=int,
help="Maximum number of errors to allow before quitting")
parser.add_argument(
"-hd", "--handlers", dest="handlers", nargs="+",
default=["VaspErrorHandler", "MeshSymmetryErrorHandler",
"UnconvergedErrorHandler", "NonConvergingErrorHandler",
"PotimErrorHandler"], type=str,
help="The ErrorHandlers to use specified as string class names, "
"with optional arguments specified as a url-like string. For "
"example, VaspErrorHandler?output_filename=myfile.out specifies a "
"VaspErrorHandler with output_name set to myfile.out. Multiple "
"arguments are joined by a comma. E.g., MyHandler?myfile=a,"
"data=1. The arguments are deserialized using yaml."
)
parser.add_argument(
"-vd", "--validators", dest="validators", nargs="+",
default=["VasprunXMLValidator"], type=str,
help="The Validators to use specified as string class names, "
"with optional arguments specified as a url-like string. For "
"example, VaspErrorHandler?output_filename=myfile.out specifies a "
"VaspErrorHandler with output_name set to myfile.out. Multiple "
"arguments are joined by a comma. E.g., MyHandler?myfile=a,"
"data=1. The arguments are deserialized using yaml."
)
parser.add_argument(
"jobs", metavar="jobs", type=str, nargs='+',
default=["relax", "relax"],
help="Jobs to execute. Only sequences of relax, "
"quickrelax, static, rampU, full_relax, static_derived, "
"nonscf_derived, optics_derived are "
"supported at the moment. For example, \"relax relax static\" "
"will run a double relaxation followed by a static "
"run. By default, suffixes are given sequential numbering,"
"but this can be overrridden by adding a number to the job"
"type, e.g. relax5 relax6 relax7")
args = parser.parse_args()
do_run(args)
|
xhqu1981/custodian
|
custodian/cli/run_vasp.py
|
Python
|
mit
| 12,242
|
[
"VASP",
"pymatgen"
] |
b1ffd875ec9031d2b0778cfd0c5c80328427c28d81a16629668ad43dd9cbec25
|
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
from pysisyphus.calculators.AnaPot import AnaPot
from pysisyphus.calculators.XTB import XTB
from pysisyphus.helpers import geom_from_xyz_file
from pysisyphus.optimizers.hessian_updates import bfgs_update, flowchart_update
def rms(arr):
return np.sqrt(np.mean(arr**2))
def max_(arr):
return np.abs(arr.max())
def rfo(gradient, H, trust=None):
H_aug = np.bmat(
((H, gradient[:,None]),
(gradient[None,:], [[0]]))
)
eigvals, eigvecs = np.linalg.eigh((H_aug+H_aug.T)/2)
aug_step = np.asarray(eigvecs[:,0]).flatten()
lambda_ = aug_step[-1]
step = aug_step[:-1] / lambda_
if trust:
norm = np.linalg.norm(step)
if np.linalg.norm(step) > trust:
step = step / norm * trust
return step
def update_trust_radius(trust_radius, coeff, last_step_norm):
# Nocedal, Numerical optimization Chapter 4, Algorithm 4.1
if coeff < 0.25:
# trust_radius = max(trust_radius/4, 0.001)
trust_radius = max(trust_radius/4, 0.1)#0.001)
# Only increase trust radius if last step norm was at least 80% of it
# See [5], Appendix, step size and direction control
# elif coeff > 0.75 and (last_step_norm >= .8*trust_radius):
elif coeff > 0.75 and (abs(last_step_norm-trust_radius) < 1e-6):
trust_radius = min(trust_radius*1.414, 2)
# elif coeff > 0.75 and (last_step_norm >= .9*trust_radius):
# trust_radius = min(trust_radius*1.414, 2)
return trust_radius
def run():
# geom = AnaPot.get_geom((-0.366, 2.03, 0))
# H = geom.mw_hessian
# geom = geom_from_xyz_file("azetidine_guess.xyz", coord_type="redund")
geom = geom_from_xyz_file("azetidine_guess.xyz")
geom.set_calculator(XTB())
# H = geom.mw_hessian
# H = geom.hessian
H = geom.get_initial_hessian()
import pdb; pdb.set_trace()
M = geom.mm_sqrt_inv
trust = 0.8
max_cycles = 75
steps = list()
gradients = list()
coords = list()
energies = list()
pred_changes = list()
trj = open("opt.trj", "w")
for i in range(max_cycles):
coords.append(geom.coords.copy())
trj.write(geom.as_xyz()+"\n")
g = geom.gradient
gradients.append(g)
energies.append(geom.energy)
if i > 0:
last_step_norm = np.linalg.norm(steps[-1])
pred = pred_changes[-1]
actual = energies[-1] - energies[-2]
# predicted will be defined when i > 0
coeff = predicted / actual # noqa: F821
trust = update_trust_radius(trust, coeff, last_step_norm)
# Hess update
dg = gradients[-1] - gradients[-2]
dx = steps[-1]
# dH, _ = bfgs_update(H, dx, dg)
dH, _ = flowchart_update(H, dx, dg)
H = H + dH
# H = geom.hessian
# Convert gradient to normal mode gradient
# cart_step = rfo(g, H, trust=trust)
# eigvals, eigvecsT = np.linalg.eigh(H)
# gq = eigvecsT.T.dot(g)
# H_diag = np.diag(eigvals)
# # Convert gradient to normal mode gradient
# dq = rfo(gq, H_diag)#, trust=trust)
# cart_step = eigvecsT.dot(dq)
# norm = np.linalg.norm(cart_step)
# if norm > trust:
# cart_step = cart_step / norm * trust
mwH = M.dot(H).dot(M)
vm, vemT = np.linalg.eigh(mwH)
S = M.dot(vemT)
gqm = S.T.dot(g)
Hm_diag = np.diag(vm)
dqm = rfo(gqm, Hm_diag)
cart_step = S.dot(dqm)
norm = np.linalg.norm(cart_step)
if norm > trust:
cart_step = cart_step / norm * trust
step_norm = np.linalg.norm(cart_step)
# print(f"norm(step)={step_norm:.6f}")
rms_g = rms(g)
max_g = max_(g)
rms_s = rms(cart_step)
max_s = max_(cart_step)
norm_g = np.linalg.norm(g)
# print(f"Cycle {i:02d}: "
# f"\tmax(grad)={max_g:.6f} rms(grad)={rms_g:.6f} "
# f"max(step)={max_s:.6f} rms(step)={rms_s:.6f}")
print(f"{i:02d}: {max_g:.6f} {rms_g:.6f} {max_s:.6f} {rms_s:.6f} {norm_g:.6f} {geom.energy:.6f} {trust:.6f}")
converged = (max_g <= 4.5e-4) and (rms_g <= 3e-4) #\
# and (max_s <= 1.8e-3) and (rms_s <= 1.2e-3)
if converged:
print("Converged!")
break
steps.append(cart_step)
new_coords = geom.coords + cart_step
geom.coords = new_coords
predicted = cart_step.dot(g) + 0.5 * cart_step.dot(H).dot(cart_step)
pred_changes.append(predicted)
print(f"Energy: {geom.energy:.8f}")
pot = geom.calculator
# pot.plot()
# coords_arr = np.array(coords)
# pot.ax.plot(coords_arr[:,0], coords_arr[:,1], "o-")
# plt.show()
trj.close()
if __name__ == "__main__":
run()
|
eljost/pysisyphus
|
tests_staging/test_ancopt/test_ancopt.py
|
Python
|
gpl-3.0
| 4,927
|
[
"xTB"
] |
a6c1333870ad2f1f22ec03ca47ce8ebe02136254db522e6cb2cfbead0385f3d8
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Google Chrome History database plugin."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import chrome as _ # pylint: disable=unused-import
from plaso.lib import definitions
from plaso.parsers.sqlite_plugins import chrome_history
from tests.parsers.sqlite_plugins import test_lib
class GoogleChrome8HistoryPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Google Chrome 8 history SQLite database plugin."""
def testProcess(self):
"""Tests the Process function on a Chrome History database file."""
plugin = chrome_history.GoogleChrome8HistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['History'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
# The History file contains 71 events (69 page visits, 1 file downloads).
self.assertEqual(storage_writer.number_of_events, 71)
events = list(storage_writer.GetEvents())
# Check the first page visited entry.
event = events[0]
self.CheckTimestamp(event.timestamp, '2011-04-07 12:03:11.000000')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_LAST_VISITED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_url = 'http://start.ubuntu.com/10.04/Google/'
self.assertEqual(event_data.url, expected_url)
self.assertEqual(event_data.title, 'Ubuntu Start Page')
expected_message = (
'{0:s} '
'(Ubuntu Start Page) [count: 0] '
'Visit Source: [SOURCE_FIREFOX_IMPORTED] Type: [LINK - User clicked '
'a link] (URL not typed directly - no typed count)').format(
expected_url)
expected_short_message = '{0:s} (Ubuntu Start Page)'.format(expected_url)
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
# Check the first file downloaded entry.
event = events[69]
self.CheckTimestamp(event.timestamp, '2011-05-23 08:35:30.000000')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_url = (
'http://fatloss4idiotsx.com/download/funcats/'
'funcats_scr.exe')
self.assertEqual(event_data.url, expected_url)
expected_full_path = '/home/john/Downloads/funcats_scr.exe'
self.assertEqual(event_data.full_path, expected_full_path)
expected_message = (
'{0:s} ({1:s}). '
'Received: 1132155 bytes out of: '
'1132155 bytes.').format(expected_url, expected_full_path)
expected_short_message = '{0:s} downloaded (1132155 bytes)'.format(
expected_full_path)
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
class GoogleChrome27HistoryPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Google Chrome 27 history SQLite database plugin."""
def testProcess57(self):
"""Tests the Process function on a Google Chrome 57 History database."""
plugin = chrome_history.GoogleChrome27HistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['History-57.0.2987.133'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
# The History file contains 2 events (1 page visits, 1 file downloads).
self.assertEqual(storage_writer.number_of_events, 2)
events = list(storage_writer.GetEvents())
# Check the page visit event.
event = events[0]
self.CheckTimestamp(event.timestamp, '2018-01-21 14:09:53.885478')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_LAST_VISITED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_url = (
'https://raw.githubusercontent.com/dfirlabs/chrome-specimens/master/'
'generate-specimens.sh')
self.assertEqual(event_data.url, expected_url)
self.assertEqual(event_data.title, '')
expected_message = (
'{0:s} '
'[count: 0] '
'Type: [START_PAGE - The start page of the browser] '
'(URL not typed directly - no typed count)').format(expected_url)
expected_short_message = '{0:s}...'.format(expected_url[:77])
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
# Check the file downloaded event.
event = events[1]
self.CheckTimestamp(event.timestamp, '2018-01-21 14:09:53.900399')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_url = (
'https://raw.githubusercontent.com/log2timeline/l2tbinaries/master/'
'win32/plaso-20171231.1.win32.msi')
self.assertEqual(event_data.url, expected_url)
expected_full_path = '/home/ubuntu/Downloads/plaso-20171231.1.win32.msi'
self.assertEqual(event_data.full_path, expected_full_path)
expected_message = (
'{0:s} ({1:s}). '
'Received: 3080192 bytes out of: 3080192 bytes.').format(
expected_url, expected_full_path)
expected_short_message = '{0:s} downloaded (3080192 bytes)'.format(
expected_full_path)
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
def testProcess58(self):
"""Tests the Process function on a Google Chrome 58 History database."""
plugin = chrome_history.GoogleChrome27HistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['History-58.0.3029.96'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
# The History file contains 2 events (1 page visits, 1 file downloads).
self.assertEqual(storage_writer.number_of_events, 2)
events = list(storage_writer.GetEvents())
# Check the page visit event.
event = events[0]
self.CheckTimestamp(event.timestamp, '2018-01-21 14:09:27.315765')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_LAST_VISITED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_url = (
'https://raw.githubusercontent.com/dfirlabs/chrome-specimens/master/'
'generate-specimens.sh')
self.assertEqual(event_data.url, expected_url)
self.assertEqual(event_data.title, '')
expected_message = (
'{0:s} '
'[count: 0] '
'Type: [START_PAGE - The start page of the browser] '
'(URL not typed directly - no typed count)').format(expected_url)
expected_short_message = '{0:s}...'.format(expected_url[:77])
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
# Check the file downloaded event.
event = events[1]
self.CheckTimestamp(event.timestamp, '2018-01-21 14:09:27.200398')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_url = (
'https://raw.githubusercontent.com/log2timeline/l2tbinaries/master/'
'win32/plaso-20171231.1.win32.msi')
self.assertEqual(event_data.url, expected_url)
expected_full_path = '/home/ubuntu/Downloads/plaso-20171231.1.win32.msi'
self.assertEqual(event_data.full_path, expected_full_path)
expected_message = (
'{0:s} ({1:s}). '
'Received: 3080192 bytes out of: 3080192 bytes.').format(
expected_url, expected_full_path)
expected_short_message = '{0:s} downloaded (3080192 bytes)'.format(
expected_full_path)
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
def testProcess59(self):
"""Tests the Process function on a Google Chrome 59 History database."""
plugin = chrome_history.GoogleChrome27HistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['History-59.0.3071.86'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
# The History file contains 2 events (1 page visits, 1 file downloads).
self.assertEqual(storage_writer.number_of_events, 2)
events = list(storage_writer.GetEvents())
# Check the page visit event.
event = events[0]
self.CheckTimestamp(event.timestamp, '2018-01-21 14:08:52.037692')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_LAST_VISITED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_url = (
'https://raw.githubusercontent.com/dfirlabs/chrome-specimens/master/'
'generate-specimens.sh')
self.assertEqual(event_data.url, expected_url)
self.assertEqual(event_data.title, '')
expected_message = (
'{0:s} '
'[count: 0] '
'Type: [START_PAGE - The start page of the browser] '
'(URL not typed directly - no typed count)').format(expected_url)
expected_short_message = '{0:s}...'.format(expected_url[:77])
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
# Check the file downloaded event.
event = events[1]
self.CheckTimestamp(event.timestamp, '2018-01-21 14:08:51.811123')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_url = (
'https://raw.githubusercontent.com/log2timeline/l2tbinaries/master/'
'win32/plaso-20171231.1.win32.msi')
self.assertEqual(event_data.url, expected_url)
expected_full_path = '/home/ubuntu/Downloads/plaso-20171231.1.win32.msi'
self.assertEqual(event_data.full_path, expected_full_path)
expected_message = (
'{0:s} ({1:s}). '
'Received: 3080192 bytes out of: 3080192 bytes.').format(
expected_url, expected_full_path)
expected_short_message = '{0:s} downloaded (3080192 bytes)'.format(
expected_full_path)
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
def testProcess59ExtraColumn(self):
"""Tests the Process function on a Google Chrome 59 History database,
manually modified to have an unexpected column.
"""
plugin = chrome_history.GoogleChrome27HistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['History-59_added-fake-column'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
# The History file contains 2 events (1 page visits, 1 file downloads).
self.assertEqual(storage_writer.number_of_events, 2)
events = list(storage_writer.GetEvents())
# Check the page visit event.
event = events[0]
self.CheckTimestamp(event.timestamp, '2018-01-21 14:08:52.037692')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_LAST_VISITED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_url = (
'https://raw.githubusercontent.com/dfirlabs/chrome-specimens/master/'
'generate-specimens.sh')
self.assertEqual(event_data.url, expected_url)
self.assertEqual(event_data.title, '')
expected_message = (
'{0:s} '
'[count: 0] '
'Type: [START_PAGE - The start page of the browser] '
'(URL not typed directly - no typed count)').format(expected_url)
expected_short_message = '{0:s}...'.format(expected_url[:77])
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
# Check the file downloaded event.
event = events[1]
self.CheckTimestamp(event.timestamp, '2018-01-21 14:08:51.811123')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_url = (
'https://raw.githubusercontent.com/log2timeline/l2tbinaries/master/'
'win32/plaso-20171231.1.win32.msi')
self.assertEqual(event_data.url, expected_url)
expected_full_path = '/home/ubuntu/Downloads/plaso-20171231.1.win32.msi'
self.assertEqual(event_data.full_path, expected_full_path)
expected_message = (
'{0:s} ({1:s}). '
'Received: 3080192 bytes out of: 3080192 bytes.').format(
expected_url, expected_full_path)
expected_short_message = '{0:s} downloaded (3080192 bytes)'.format(
expected_full_path)
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
|
rgayon/plaso
|
tests/parsers/sqlite_plugins/chrome_history.py
|
Python
|
apache-2.0
| 12,609
|
[
"VisIt"
] |
03e03e3909539b93fc01157d7e3f42d4622720324a31375812357b7ea2e191c2
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkImagePadFilter(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkImagePadFilter(), 'Processing.',
('vtkImageData',), ('vtkImageData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkImagePadFilter.py
|
Python
|
bsd-3-clause
| 491
|
[
"VTK"
] |
d6422565e6fee860e69feaef9cb8b86502c34b842b0b5dcd54e38d89e064aa06
|
from __future__ import division, unicode_literals
import warnings
import matplotlib
matplotlib.use('pdf')
import unittest as unittest
import numpy as np
from pymatgen import Composition
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.analysis.phase_diagram import PhaseDiagram, \
GrandPotentialPhaseDiagram
from pymatgen.analysis.interface_reactions import InterfacialReactivity
class InterfaceReactionTest(unittest.TestCase):
def setUp(self):
self.entries = [ComputedEntry(Composition('Li'), 0),
ComputedEntry(Composition('Mn'), 0),
ComputedEntry(Composition('O2'), 0),
ComputedEntry(Composition('MnO2'), -10),
ComputedEntry(Composition('Mn2O4'), -60),
ComputedEntry(Composition('MnO3'), 20),
ComputedEntry(Composition('Li2O'), -10),
ComputedEntry(Composition('Li2O2'), -8),
ComputedEntry(Composition('LiMnO2'), -30)
]
self.pd = PhaseDiagram(self.entries)
chempots = {'Li': -3}
self.gpd = GrandPotentialPhaseDiagram(self.entries, chempots)
self.ir = []
self.ir.append(
InterfacialReactivity(Composition('O2'), Composition('Mn'), self.pd,
norm=0, include_no_mixing_energy=0,
pd_non_grand=None, use_hull_energy=False))
self.ir.append(
InterfacialReactivity(Composition('MnO2'), Composition('Mn'),
self.gpd, norm=0, include_no_mixing_energy=1,
pd_non_grand=self.pd, use_hull_energy=False))
self.ir.append(
InterfacialReactivity(Composition('Mn'), Composition('O2'),
self.gpd, norm=1, include_no_mixing_energy=1,
pd_non_grand=self.pd, use_hull_energy=False))
self.ir.append(
InterfacialReactivity(Composition('Li2O'), Composition('Mn'),
self.gpd, norm=0, include_no_mixing_energy=1,
pd_non_grand=self.pd, use_hull_energy=False))
self.ir.append(
InterfacialReactivity(Composition('Mn'), Composition('O2'),
self.gpd, norm=1, include_no_mixing_energy=0,
pd_non_grand=self.pd, use_hull_energy=False))
self.ir.append(
InterfacialReactivity(Composition('Mn'), Composition('Li2O'),
self.gpd, norm=1, include_no_mixing_energy=1,
pd_non_grand=self.pd, use_hull_energy=False))
self.ir.append(
InterfacialReactivity(Composition('Li2O2'), Composition('Li'), self.pd,
norm=0, include_no_mixing_energy=0,
pd_non_grand=None, use_hull_energy=True))
self.ir.append(
InterfacialReactivity(Composition('Li2O2'), Composition('Li'), self.pd,
norm=0, include_no_mixing_energy=0,
pd_non_grand=None, use_hull_energy=False))
self.ir.append(
InterfacialReactivity(Composition('Li2O2'), Composition('MnO2'), self.gpd,
norm=0, include_no_mixing_energy=0,
pd_non_grand=self.pd, use_hull_energy=True))
self.ir.append(
InterfacialReactivity(Composition('Li2O2'), Composition('MnO2'), self.gpd,
norm=0, include_no_mixing_energy=0,
pd_non_grand=self.pd, use_hull_energy=False))
with self.assertRaises(Exception) as context1:
self.ir.append(
InterfacialReactivity(Composition('Li2O2'), Composition('Li'),
self.pd, norm=0,
include_no_mixing_energy=1,
pd_non_grand=None))
self.assertTrue(
'Please provide grand phase diagram to compute no_mixing_energy!' == str(
context1.exception))
with self.assertRaises(Exception) as context2:
self.ir.append(
InterfacialReactivity(Composition('O2'), Composition('Mn'),
self.gpd, norm=0,
include_no_mixing_energy=1,
pd_non_grand=None))
self.assertTrue(
'Please provide non-grand phase diagram to compute no_mixing_energy!' == str(
context2.exception))
def test_get_entry_energy(self):
# Test warning message.
comp = Composition('MnO3')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
energy = self.ir[0]._get_entry_energy(self.pd, comp)
self.assertTrue(len(w) == 1)
self.assertTrue("The reactant MnO3 has no matching entry with negative"
" formation energy, instead convex hull energy for this"
" composition will be used for reaction energy calculation."
in str(w[-1].message))
test1 = np.isclose(energy, -30, atol=1e-03)
self.assertTrue(test1,
'_get_entry_energy: energy for {} is wrong!'.format(
comp.reduced_formula))
# Test normal functionality
comp = Composition('MnO2')
test2 = np.isclose(self.ir[0]._get_entry_energy(self.pd, comp), -30,
atol=1e-03)
self.assertTrue(test2,
'_get_entry_energy: energy for {} is wrong!'.format(
comp.reduced_formula))
def test_get_grand_potential(self):
comp = Composition('LiMnO2')
# Test non-normalized case
test1 = np.isclose(self.ir[1]._get_grand_potential(comp), -27,
atol=1e-03)
self.assertTrue(test1,
'_get_grand_potential: Non-normalized case gets error!')
# Test normalized case
test2 = np.isclose(self.ir[2]._get_grand_potential(comp), -9,
atol=1e-03)
self.assertTrue(test2,
'_get_grand_potential: Normalized case gets error!')
comp2 = Composition('Li2O2')
# Test use_hull_energy option.
test3 = np.isclose(self.ir[8]._get_grand_potential(comp2), -4,
atol=1e-03)
self.assertTrue(test3,
'_get_grand_potential: get hull energy gets error!')
test4 = np.isclose(self.ir[9]._get_grand_potential(comp2), -2,
atol=1e-03)
self.assertTrue(test4,
'_get_grand_potential: gets error for {}!'.format(
comp2.reduced_formula))
def test_get_energy(self):
test1 = (np.isclose(self.ir[0]._get_energy(0.5), -15, atol=1e-03))
self.assertTrue(test1, '_get_energy: phase diagram gets error!')
test2 = (
np.isclose(self.ir[3]._get_energy(0.6666666), -7.333333, atol=1e-03))
self.assertTrue(test2,
'_get_energy: grand canonical phase diagram gets error!')
test3 = (
np.isclose(self.ir[6]._get_energy(0.3333333), -3.333333, atol=1e-03))
self.assertTrue(test3,
'_get_energy: convex hull energy gets error. ')
test4 = (
np.isclose(self.ir[7]._get_energy(0.3333333), -4, atol=1e-03))
self.assertTrue(test4,
'_get_energy: gets error. ')
def test_get_reaction(self):
test1 = str(self.ir[0]._get_reaction(0.5)) == 'O2 + Mn -> MnO2'
self.assertTrue(test1,
'_get_reaction: reaction not involving chempots species gets error!')
test2 = str(self.ir[0]._get_reaction(0.5,
normalize=1)) == '0.5 O2 + 0.5 Mn -> 0.5 MnO2'
self.assertTrue(test2,
'_get_reaction: reaction not involving chempots species gets error!')
test3 = str(self.ir[3]._get_reaction(
0.666666)) == '2 Mn + 2 Li2O -> 4 Li + MnO2 + Mn' or '2 Mn + 2 Li2O -> 4 Li + Mn + MnO2'
self.assertTrue(test3,
'_get_reaction: reaction involving chempots species gets error!')
def test_convert(self):
test_array = [(0.5, 1, 3), (0.4, 2, 3), (0, 1, 9), (1, 2, 7)]
result = [self.ir[0]._convert(x, f1, f2) for x, f1, f2 in test_array]
answer = [0.75, 0.5, 0, 1]
self.assertTrue(np.allclose(result, answer),
'_convert: conversion gets error! {0} expected, but gets {1}'.format(
answer, result))
def test_reverse_convert(self):
test_array = [(0.5, 1, 3), (0.4, 2, 3), (0, 1, 9), (1, 2, 7)]
result = [self.ir[0]._reverse_convert(x, f1, f2) for x, f1, f2 in
test_array]
answer = [0.25, 0.3076923, 0, 1]
self.assertTrue(np.allclose(result, answer),
'_convert: conversion gets error! {0} expected, but gets {1}'.format(
answer, result))
def test_get_products(self):
test1 = sorted(self.ir[0].get_products()) == sorted(
['MnO2', 'O2', 'Mn'])
self.assertTrue(test1,
'get_products: decomposition products gets error for reaction not involving chempots species!')
test2 = sorted(self.ir[3].get_products()) == sorted(
['Li', 'MnO2', 'Mn', 'Li2O'])
self.assertTrue(test2,
'get_decomp: decomposition products gets error for reaction involving chempots species!')
def test_get_kinks(self):
ir = self.ir[0]
lst = list(self.ir[0].get_kinks())
index = [i[0] for i in lst]
x_kink = [i[1] for i in lst]
energy_kink = [i[2] for i in lst]
react_kink = [str(i[3]) for i in lst]
test1 = index == [1, 2, 3]
self.assertTrue(test1, 'get_kinks:index gets error!')
test2 = np.allclose(x_kink, [0, 0.5, 1])
self.assertTrue(test2, 'get_kinks:x kinks gets error!')
test3 = np.allclose(energy_kink, [0, -15, 0])
self.assertTrue(test3, 'get_kinks:energy kinks gets error!')
test4 = react_kink == ['Mn -> Mn', 'O2 + Mn -> MnO2', 'O2 -> O2']
self.assertTrue(test4,
'get_kinks:reaction kinks gets error for {0} and {1} reaction!'.format(
ir.c1_original.reduced_formula,
ir.c2_original.reduced_formula))
def test_labels(self):
ir = self.ir[0]
dict = ir.labels()
test1 = dict == {1: 'x= 0.0 energy = 0.0 Mn -> Mn',
2: 'x= 0.5 energy = -15.0 O2 + Mn -> MnO2',
3: 'x= 1.0 energy = 0.0 O2 -> O2'}
self.assertTrue(test1,
'labels:label does not match for interfacial system with {0} and {1}.'.format(
ir.c1_original.reduced_formula,
ir.c2_original.reduced_formula))
def test_plot(self):
# Test plot is hard. Here just to call the plot function to see if any error occurs.
for i in self.ir:
i.plot()
def test_minimum(self):
answer = [
(0.5, -15),
(0, 0),
(0.3333333, -10),
(0.6666666, -7.333333),
(0.3333333, -7.333333),
(0.1428571, -7.333333),
(0.3333333, -3.333333),
(0.3333333, -4.0),
]
for i, j in zip(self.ir, answer):
self.assertTrue(np.allclose(i.minimum(), j),
'minimum: the system with {0} and {1} gets error!{2} expected, but gets {3}'.format(
i.c1_original.reduced_formula,
i.c2_original.reduced_formula, str(j),
str(i.minimum())))
def test_get_no_mixing_energy(self):
with self.assertRaises(Exception) as context1:
self.ir[0].get_no_mixing_energy()
self.assertTrue(
'Please provide grand potential phase diagram for computing no_mixing_energy!' == str(
context1.exception))
answer = [
[(u'MnO2 (eV/f.u.)', 0.0), (u'Mn (eV/f.u.)', 0.0)],
[(u'Mn (eV/atom)', 0.0), (u'O2 (eV/atom)', -4.0)],
[(u'Li2O (eV/f.u.)', 0.0), (u'Mn (eV/f.u.)', 0.0)],
[(u'Mn (eV/atom)', 0.0), (u'O2 (eV/atom)', -4.0)],
[(u'Mn (eV/atom)', 0.0), (u'Li2O (eV/atom)', 0.0)]
]
def name_lst(lst):
return (lst[0][0], lst[1][0])
def energy_lst(lst):
return (lst[0][1], lst[1][1])
result_info = [i.get_no_mixing_energy() for i in self.ir if i.grand]
for i, j in zip(result_info, answer):
self.assertTrue(name_lst(i) == name_lst(j),
'get_no_mixing_energy: names get error, {0} expected but gets {1}'.format(
name_lst(j), name_lst(i)))
self.assertTrue(np.allclose(energy_lst(i), energy_lst(j)),
'get_no_mixing_energy: no_mixing energies get error, {0} expected but gets {1}'.format(
energy_lst(j), energy_lst(i)))
if __name__ == '__main__':
unittest.main()
|
gpetretto/pymatgen
|
pymatgen/analysis/tests/test_interface_reactions.py
|
Python
|
mit
| 13,779
|
[
"pymatgen"
] |
14b68b8f91490ae13b43adc90db8de9e2570c39f575cbf76e59412b722b5c1ce
|
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
"""
Twitter's wrapper around the optparse module for doing more stateless builder-style
options parsing.
Typical usage:
from twitter.common import options
base_parser = options.parser()
my_opts = [
options.Option(...),
options.Option(...)
]
my_foo_opts = [
options.Option(...),
options.Option(...)
]
group = base_parser.new_group('foo')
group.add_option(*my_foo_opts)
parser = base_parser
.options(my_opts)
.groups([group])
.interspersed_arguments(True)
.usage("blah blah blah"))
values, rargs = parser.parse()
"""
__author__ = 'Brian Wickman'
import copy
import sys
import inspect
import types
from optparse import (
OptionParser,
OptionValueError,
Option,
OptionGroup,
Values,
NO_DEFAULT
)
def parser():
return TwitterOptionParser()
def new_group(name):
return TwitterOptionGroup(name)
group = new_group
__all__ = [
'parser',
'new_group',
'group', # alias for new_group
'Option',
'Values'
]
class TwitterOptionGroup(object):
def __init__(self, name):
self._name = name
self._option_list = []
def add_option(self, *option):
self._option_list.extend(option)
def prepend_option(self, *option):
self._option_list = list(option) + self._option_list
def options(self):
return self._option_list
def name(self):
return self._name
@staticmethod
def format_help(group, header=None):
pass
class TwitterOptionParser(object):
"""
Wrapper for builder-style stateless options parsing.
"""
class InvalidParameters(Exception): pass
class InvalidArgument(Exception): pass
ATTRS = [ '_interspersed_arguments', '_usage', '_options', '_groups', '_values' ]
def __init__(self):
self._interspersed_arguments = False
self._usage = ""
self._options = []
self._groups = []
self._values = Values()
def interspersed_arguments(self, i_a=None):
""" Enable/disable interspersed arguments. """
if i_a is None:
return self._interspersed_arguments
me = self._copy()
me._interspersed_arguments = i_a
return me
def usage(self, new_usage=None):
""" Get/set usage. """
if new_usage is None:
return self._usage
me = self._copy()
me._usage = new_usage
return me
def options(self, merge_options=None):
""" Get/add options. """
if merge_options is None:
return self._options
me = self._copy()
me._options.extend(merge_options)
return me
def groups(self, merge_groups=None):
""" Get/add groups. """
if merge_groups is None:
return self._groups
me = self._copy()
me._groups.extend(merge_groups)
return me
def values(self, merge_values=None):
""" Get/update default/parsed values. """
if merge_values is None:
return self._values
me = self._copy()
TwitterOptionParser._merge_values(me._values, merge_values)
return me
@staticmethod
def _merge_values(values1, values2):
for attr in values2.__dict__:
if getattr(values2, attr) != NO_DEFAULT:
setattr(values1, attr, getattr(values2, attr))
def _copy(self):
c = TwitterOptionParser()
for attr in TwitterOptionParser.ATTRS:
setattr(c, attr, copy.deepcopy(getattr(self, attr)))
return c
def _init_parser(self):
parser = OptionParser(add_help_option=False, usage=self.usage())
parser.allow_interspersed_args = self.interspersed_arguments()
for op in self.options():
parser.add_option(copy.deepcopy(op))
for gr in self.groups():
real_group = parser.add_option_group(gr.name())
for op in gr.options():
real_group.add_option(copy.deepcopy(op))
return parser
# There is enough special-casing that we're doing to muck with the optparse
# module that it might be worthwhile in writing our own, sigh.
def parse(self, argv=None):
""" Parse argv. If argv=None, use sys.argv[1:]. """
parser = self._init_parser()
inherit_values = copy.deepcopy(self.values())
if isinstance(inherit_values, dict):
inherit_values = Values(inherit_values)
if argv is None:
argv = sys.argv[1:]
values, leftover = parser.parse_args(args=argv)
for attr in copy.copy(values.__dict__):
if getattr(values, attr) is None:
delattr(values, attr)
TwitterOptionParser._merge_values(inherit_values, values)
return inherit_values, leftover
def print_help(self):
parser = self._init_parser()
parser.print_help()
def error(self, message):
parser = self._init_parser()
parser.error(message)
def __enter__(self):
return self
def __exit__(self, *args):
return False
|
foursquare/commons-old
|
src/python/twitter/common/options/__init__.py
|
Python
|
apache-2.0
| 5,609
|
[
"Brian"
] |
26bc8d002c21c86d2d91ae0b5ca87aabfb9529db46a1465a42ab7a10d56ae51b
|
import warnings
warnings.simplefilter('always', DeprecationWarning)
import os
import functools
import os.path
import re
import urllib
import urllib2
import json
import imp
import random
import tabulate
from connection import H2OConnection
from job import H2OJob
from expr import ExprNode
from frame import H2OFrame, _py_tmp_key
from model import H2OBinomialModel,H2OAutoEncoderModel,H2OClusteringModel,H2OMultinomialModel,H2ORegressionModel
import h2o_model_builder
__PROGRESS_BAR__ = True # display & update progress bar while polling
def lazy_import(path):
"""
Import a single file or collection of files.
:param path: A path to a data file (remote or local).
:return: A new H2OFrame
"""
if isinstance(path,(list,tuple)): return [_import(p)[0] for p in path]
elif os.path.isdir(path): return _import(path)
else: return [_import(path)[0]]
def _import(path):
j = H2OConnection.get_json(url_suffix="ImportFiles", path=path)
if j['fails']: raise ValueError("ImportFiles of " + path + " failed on " + str(j['fails']))
return j['destination_frames']
def upload_file(path, destination_frame=""):
"""
Upload a dataset at the path given from the local machine to the H2O cluster.
:param path: A path specifying the location of the data to upload.
:param destination_frame: The name of the H2O Frame in the H2O Cluster.
:return: A new H2OFrame
"""
fui = {"file": os.path.abspath(path)}
destination_frame = _py_tmp_key() if destination_frame == "" else destination_frame
H2OConnection.post_json(url_suffix="PostFile", file_upload_info=fui,destination_frame=destination_frame)
return H2OFrame(raw_id=destination_frame)
def import_file(path=None):
"""
Import a frame from a file (remote or local machine). If you run H2O on Hadoop, you can access to HDFS
:param path: A path specifying the location of the data to import.
:return: A new H2OFrame
"""
return H2OFrame(file_path=path)
def parse_setup(raw_frames):
"""
:param raw_frames: A collection of imported file frames
:return: A ParseSetup "object"
"""
# The H2O backend only accepts things that are quoted
if isinstance(raw_frames, unicode): raw_frames = [raw_frames]
j = H2OConnection.post_json(url_suffix="ParseSetup", source_frames=[_quoted(id) for id in raw_frames])
return j
def parse(setup, h2o_name, first_line_is_header=(-1, 0, 1)):
"""
Trigger a parse; blocking; removeFrame just keep the Vecs.
:param setup: The result of calling parse_setup.
:param h2o_name: The name of the H2O Frame on the back end.
:param first_line_is_header: -1 means data, 0 means guess, 1 means header.
:return: A new parsed object
"""
# Parse parameters (None values provided by setup)
p = { 'destination_frame' : h2o_name,
'parse_type' : None,
'separator' : None,
'single_quotes' : None,
'check_header' : None,
'number_columns' : None,
'chunk_size' : None,
'delete_on_done' : True,
'blocking' : False,
}
if isinstance(first_line_is_header, tuple):
first_line_is_header = setup["check_header"]
if setup["column_names"]:
setup["column_names"] = [_quoted(name) for name in setup["column_names"]]
p["column_names"] = None
if setup["column_types"]:
setup["column_types"] = [_quoted(name) for name in setup["column_types"]]
p["column_types"] = None
if setup["na_strings"]:
setup["na_strings"] = [[_quoted(na) for na in col] if col is not None else [] for col in setup["na_strings"]]
p["na_strings"] = None
# update the parse parameters with the parse_setup values
p.update({k: v for k, v in setup.iteritems() if k in p})
p["check_header"] = first_line_is_header
# Extract only 'name' from each src in the array of srcs
p['source_frames'] = [_quoted(src['name']) for src in setup['source_frames']]
# Request blocking parse
j = H2OJob(H2OConnection.post_json(url_suffix="Parse", **p), "Parse").poll()
return j.jobs
def parse_raw(setup, id=None, first_line_is_header=(-1,0,1)):
"""
Used in conjunction with lazy_import and parse_setup in order to make alterations before parsing.
:param setup: Result of h2o.parse_setup
:param id: An optional id for the frame.
:param first_line_is_header: -1,0,1 if the first line is to be used as the header
:return: An H2OFrame object
"""
id = setup["destination_frame"]
fr = H2OFrame()
parsed = parse(setup, id, first_line_is_header)
fr._nrows = parsed['rows']
fr._col_names = parsed['column_names']
fr._ncols = len(fr._col_names)
fr._computed = True
fr._id = id
fr._keep = True
return fr
def _quoted(key):
if key == None: return "\"\""
is_quoted = len(re.findall(r'\"(.+?)\"', key)) != 0
key = key if is_quoted else "\"" + key + "\""
return key
def assign(data,id):
rapids(ExprNode(",", ExprNode("gput", id, data), ExprNode("removeframe", data))._eager())
data._id = id
return data
def which(condition):
"""
:param condition: A conditional statement.
:return: A H2OFrame of 1 column filled with 0-based indices for which the condition is True
"""
return H2OFrame(expr=ExprNode("h2o.which",condition,False))._frame()
def ifelse(test,yes,no):
"""
Semantically equivalent to R's ifelse.
Based on the booleans in the test vector, the output has the values of the yes and no
vectors interleaved (or merged together).
:param test: A "test" H2OFrame
:param yes: A "yes" H2OFrame
:param no: A "no" H2OFrame
:return: An H2OFrame
"""
return H2OFrame(expr=ExprNode("ifelse",test,yes,no))._frame()
def get_future_model(future_model):
"""
Waits for the future model to finish building, and then returns the model.
:param future_model: an H2OModelFuture object
:return: a resolved model (i.e. an H2OBinomialModel, H2ORegressionModel, H2OMultinomialModel, ...)
"""
return h2o_model_builder._resolve_model(future_model)
def get_model(model_id):
"""
Return the specified model
:param model_id: The model identification in h2o
"""
model_json = H2OConnection.get_json("Models/"+model_id)["models"][0]
model_type = model_json["output"]["model_category"]
if model_type=="Binomial": return H2OBinomialModel(model_id, model_json)
elif model_type=="Clustering": return H2OClusteringModel(model_id, model_json)
elif model_type=="Regression": return H2ORegressionModel(model_id, model_json)
elif model_type=="Multinomial": return H2OMultinomialModel(model_id, model_json)
elif model_type=="AutoEncoder": return H2OAutoEncoderModel(model_id, model_json)
else: raise NotImplementedError(model_type)
def get_frame(frame_id):
"""
Obtain a handle to the frame in H2O with the frame_id key.
:return: An H2OFrame
"""
return H2OFrame.get_frame(frame_id)
"""
Here are some testing utilities for running the pyunit tests in conjunction with run.py.
run.py issues an ip and port as a string: "<ip>:<port>".
The expected value of sys_args[1] is "<ip>:<port>"
"""
"""
All tests MUST have the following structure:
import sys
sys.path.insert(1, "..") # may vary depending on this test's position relative to h2o-py
import h2o
def my_test(ip=None, port=None):
...test filling...
if __name__ == "__main__":
h2o.run_test(sys.argv, my_test)
So each test must have an ip and port
"""
# TODO/FIXME: need to create an internal testing framework for python ... internal IP addresses should NOT be published as part of package!
# HDFS helpers
def get_h2o_internal_hdfs_name_node():
return "172.16.2.176"
def is_running_internal_to_h2o():
url = "http://{0}:50070".format(get_h2o_internal_hdfs_name_node())
try:
urllib2.urlopen(urllib2.Request(url))
internal = True
except:
internal = False
return internal
def check_models(model1, model2, use_cross_validation=False, op='e'):
"""
Check that the given models are equivalent
:param model1:
:param model2:
:param use_cross_validation: boolean. if True, use validation metrics to determine model equality. Otherwise, use
training metrics.
:param op: comparison operator to use. 'e':==, 'g':>, 'ge':>=
:return: None. Throw meaningful error messages if the check fails
"""
# 1. Check model types
model1_type = type(model1)
model2_type = type(model2)
assert model1_type == model2_type, "The model types differ. The first model is of type {0} and the second " \
"models is of type {1}.".format(model1_type, model2_type)
# 2. Check model metrics
if isinstance(model1,H2OBinomialModel): # 2a. Binomial
# F1
f1_1 = model1.F1(xval=use_cross_validation)
f1_2 = model2.F1(xval=use_cross_validation)
if op == 'e': assert f1_1[0][1] == f1_2[0][1], "The first model has an F1 of {0} and the second model has an F1 of " \
"{1}. Expected the first to be == to the second.".format(f1_1[0][1], f1_2[0][1])
elif op == 'g': assert f1_1[0][1] > f1_2[0][1], "The first model has an F1 of {0} and the second model has an F1 of " \
"{1}. Expected the first to be > than the second.".format(f1_1[0][1], f1_2[0][1])
elif op == 'ge': assert f1_1[0][1] >= f1_2[0][1], "The first model has an F1 of {0} and the second model has an F1 of " \
"{1}. Expected the first to be >= than the second.".format(f1_1[0][1], f1_2[0][1])
elif isinstance(model1,H2ORegressionModel): # 2b. Regression
# MSE
mse1 = model1.mse(xval=use_cross_validation)
mse2 = model2.mse(xval=use_cross_validation)
if op == 'e': assert mse1 == mse2, "The first model has an MSE of {0} and the second model has an MSE of " \
"{1}. Expected the first to be == to the second.".format(mse1, mse2)
elif op == 'g': assert mse1 > mse2, "The first model has an MSE of {0} and the second model has an MSE of " \
"{1}. Expected the first to be > than the second.".format(mse1, mse2)
elif op == 'ge': assert mse1 >= mse2, "The first model has an MSE of {0} and the second model has an MSE of " \
"{1}. Expected the first to be >= than the second.".format(mse1, mse2)
elif isinstance(model1,H2OMultinomialModel): # 2c. Multinomial
# hit-ratio
pass
elif isinstance(model1,H2OClusteringModel): # 2d. Clustering
# totss
totss1 = model1.totss(xval=use_cross_validation)
totss2 = model2.totss(xval=use_cross_validation)
if op == 'e': assert totss1 == totss2, "The first model has an TOTSS of {0} and the second model has an " \
"TOTSS of {1}. Expected the first to be == to the second.".format(totss1,
totss2)
elif op == 'g': assert totss1 > totss2, "The first model has an TOTSS of {0} and the second model has an " \
"TOTSS of {1}. Expected the first to be > than the second.".format(totss1,
totss2)
elif op == 'ge': assert totss1 >= totss2, "The first model has an TOTSS of {0} and the second model has an " \
"TOTSS of {1}. Expected the first to be >= than the second." \
"".format(totss1, totss2)
def check_dims_values(python_obj, h2o_frame, rows, cols):
"""
Check that the dimensions and values of the python object and H2OFrame are equivalent. Assumes that the python object
conforms to the rules specified in the h2o frame documentation.
:param python_obj: a (nested) list, tuple, dictionary, numpy.ndarray, ,or pandas.DataFrame
:param h2o_frame: an H2OFrame
:param rows: number of rows
:param cols: number of columns
:return: None
"""
h2o_rows, h2o_cols = h2o_frame.dim()
assert h2o_rows == rows and h2o_cols == cols, "failed dim check! h2o_rows:{0} rows:{1} h2o_cols:{2} cols:{3}" \
"".format(h2o_rows, rows, h2o_cols, cols)
if isinstance(python_obj, (list, tuple)):
for r in range(rows):
for c in range(cols):
pval = python_obj[r][c] if rows > 1 else python_obj[c]
hval = h2o_frame[r,c]
assert pval == hval, "expected H2OFrame to have the same values as the python object for row {0} and column " \
"{1}, but h2o got {2} and python got {3}.".format(r, c, hval, pval)
elif isinstance(python_obj, dict):
for r in range(rows):
for k in python_obj.keys():
pval = python_obj[k][r] if hasattr(python_obj[k],'__iter__') else python_obj[k]
hval = h2o_frame[r,k]
assert pval == hval, "expected H2OFrame to have the same values as the python object for row {0} and column " \
"{1}, but h2o got {2} and python got {3}.".format(r, k, hval, pval)
def np_comparison_check(h2o_data, np_data, num_elements):
"""
Check values achieved by h2o against values achieved by numpy
:param h2o_data: an H2OFrame or H2OVec
:param np_data: a numpy array
:param num_elements: number of elements to compare
:return: None
"""
# Check for numpy
try:
imp.find_module('numpy')
except ImportError:
assert False, "failed comparison check because unable to import numpy"
import numpy as np
rows, cols = h2o_data.dim()
for i in range(num_elements):
r = random.randint(0,rows-1)
c = random.randint(0,cols-1)
h2o_val = h2o_data[r,c] if isinstance(h2o_data,H2OFrame) else h2o_data[r]
np_val = np_data[r,c] if len(np_data.shape) > 1 else np_data[r]
if isinstance(np_val, np.bool_): np_val = bool(np_val) # numpy haz special bool type :(
assert np.absolute(h2o_val - np_val) < 1e-6, \
"failed comparison check! h2o computed {0} and numpy computed {1}".format(h2o_val, np_val)
def run_test(sys_args, test_to_run):
# import pkg_resources
# ver = pkg_resources.get_distribution("h2o").version
# print "H2O PYTHON PACKAGE VERSION: " + str(ver)
ip, port = sys_args[2].split(":")
init(ip,port,strict_version_check=False)
log_and_echo("------------------------------------------------------------")
log_and_echo("")
log_and_echo("STARTING TEST: "+str(ou()))
log_and_echo("")
log_and_echo("------------------------------------------------------------")
num_keys = store_size()
try:
if len(sys_args) > 3 and sys_args[3] == "--ipynb": ipy_notebook_exec(sys_args[4],save_and_norun=False)
else: test_to_run(ip, port)
finally:
remove_all()
if keys_leaked(num_keys): print "Leaked Keys!"
def ou():
"""
Where is my baguette!?
:return: the name of the baguette. oh uhr uhr huhr
"""
from inspect import stack
return stack()[2][1]
def no_progress():
"""
Disable the progress bar from flushing to stdout. The completed progress bar is printed
when a job is complete so as to demarcate a log file.
:return: None
"""
global __PROGRESS_BAR__
__PROGRESS_BAR__=False
def do_progress():
"""
Enable the progress bar. (Progress bar is enabled by default).
:return: None
"""
global __PROGRESS_BAR__
__PROGRESS_BAR__=True
def log_and_echo(message):
"""
Log a message on the server-side logs
This is helpful when running several pieces of work one after the other on a single H2O
cluster and you want to make a notation in the H2O server side log where one piece of
work ends and the next piece of work begins.
Sends a message to H2O for logging. Generally used for debugging purposes.
:param message: A character string with the message to write to the log.
:return: None
"""
if message is None: message = ""
H2OConnection.post_json("LogAndEcho", message=message)
def ipy_notebook_exec(path,save_and_norun=False):
notebook = json.load(open(path))
program = ''
for block in ipy_blocks(notebook):
for line in ipy_lines(block):
if "h2o.init" not in line:
program += line if '\n' in line else line + '\n'
if save_and_norun:
with open(os.path.basename(path).split('ipynb')[0]+'py',"w") as f:
f.write(program)
else:
d={}
exec program in d # safe, but horrible (exec is horrible)
def ipy_blocks(notebook):
if 'worksheets' in notebook.keys():
return notebook['worksheets'][0]['cells'] # just take the first worksheet
elif 'cells' in notebook.keys():
return notebook['cells']
else:
raise NotImplementedError, "ipython notebook cell/block json format not handled"
def ipy_lines(block):
if 'source' in block.keys():
return block['source']
elif 'input' in block.keys():
return block['input']
else:
raise NotImplementedError, "ipython notebook source/line json format not handled"
def remove(object):
"""
Remove object from H2O. This is a "hard" delete of the object. It removes all subparts.
:param object: The object pointing to the object to be removed.
:return: None
"""
if object is None:
raise ValueError("remove with no object is not supported, for your protection")
if isinstance(object, H2OFrame): H2OConnection.delete("DKV/"+object._id)
if isinstance(object, str): H2OConnection.delete("DKV/"+object)
def remove_all():
"""
Remove all objects from H2O.
:return None
"""
H2OConnection.delete("DKV")
def removeFrameShallow(key):
"""
Do a shallow DKV remove of the frame (does not remove any internal Vecs).
This is a "soft" delete. Just removes the top level pointer, but all big data remains!
:param key: A Frame Key to be removed
:return: None
"""
rapids("(removeframe '"+key+"')")
return None
def rapids(expr, id=None):
"""
Fire off a Rapids expression.
:param expr: The rapids expression (ascii string).
:return: The JSON response of the Rapids execution
"""
if isinstance(expr, list): expr = ExprNode._collapse_sb(expr)
expr = "(= !{} {})".format(id,expr) if id is not None else expr
result = H2OConnection.post_json("Rapids", ast=urllib.quote(expr), _rest_version=99)
if result['error'] is not None:
raise EnvironmentError("rapids expression not evaluated: {0}".format(str(result['error'])))
return result
def ls():
"""
List Keys on an H2O Cluster
:return: Returns a list of keys in the current H2O instance
"""
return H2OFrame(expr=ExprNode("ls"))._frame().as_data_frame()
def frame(frame_id, exclude=""):
"""
Retrieve metadata for a id that points to a Frame.
:param frame_id: A pointer to a Frame in H2O.
:return: Meta information on the frame
"""
return H2OConnection.get_json("Frames/" + urllib.quote(frame_id+exclude))
def frames():
"""
Retrieve all the Frames.
:return: Meta information on the frames
"""
return H2OConnection.get_json("Frames")
def download_pojo(model,path="", get_jar=True):
"""
Download the POJO for this model to the directory specified by path (no trailing slash!).
If path is "", then dump to screen.
:param model: Retrieve this model's scoring POJO.
:param path: An absolute path to the directory where POJO should be saved.
:param get_jar: Retrieve the h2o genmodel jar also.
:return: None
"""
java = H2OConnection.get( "Models.java/"+model._id )
file_path = path + "/" + model._id + ".java"
if path == "": print java.text
else:
with open(file_path, 'w') as f:
f.write(java.text)
if get_jar and path!="":
url = H2OConnection.make_url("h2o-genmodel.jar")
filename = path + "/" + "h2o-genmodel.jar"
response = urllib2.urlopen(url)
with open(filename, "w") as f:
f.write(response.read())
def download_csv(data, filename):
"""
Download an H2O data set to a CSV file on the local disk.
Warning: Files located on the H2O server may be very large! Make
sure you have enough hard drive space to accommodate the entire file.
:param data: an H2OFrame object to be downloaded.
:param filename:A string indicating the name that the CSV file should be
should be saved to.
:return: None
"""
data._eager()
if not isinstance(data, H2OFrame): raise(ValueError, "`data` argument must be an H2OFrame, but got " + type(data))
url = "http://{}:{}/3/DownloadDataset?frame_id={}".format(H2OConnection.ip(),H2OConnection.port(),data._id)
with open(filename, 'w') as f:
response = urllib2.urlopen(url)
f.write(response.read())
def download_all_logs(dirname=".",filename=None):
"""
Download H2O Log Files to Disk
:param dirname: (Optional) A character string indicating the directory that the log file should be saved in.
:param filename: (Optional) A string indicating the name that the CSV file should be
:return: path of logs written (as a string)
"""
url = 'http://' + H2OConnection.ip() + ':' + str(H2OConnection.port()) + '/Logs/download'
response = urllib2.urlopen(url)
if not os.path.exists(dirname): os.mkdir(dirname)
if filename == None:
for h in response.headers.headers:
if 'filename=' in h:
filename = h.split("filename=")[1].strip()
break
path = os.path.join(dirname,filename)
with open(path, 'w') as f:
response = urllib2.urlopen(url)
f.write(response.read())
f.close()
print "Writing H2O logs to " + path
return path
def save_model(model, dir="", name="", filename="", force=False):
"""
Save an H2O Model Object to Disk.
In the case of existing files force = TRUE will overwrite the file. Otherwise, the operation will fail.
:param dir: string indicating the directory the model will be written to.
:param name: string name of the file.
:param filename: full path to the file.
:param force: logical, indicates how to deal with files that already exist
:return: the path of the model (string)
"""
if not isinstance(dir, str): raise ValueError("`dir` must be a character string")
if dir == "": dir = os.getcwd()
if not isinstance(name, str): raise ValueError("`name` must be a character string")
if name == "": name = model._model_json['model_id']['name']
if not isinstance(filename, str): raise ValueError("`filename` must be a character string")
if not isinstance(force, bool): raise ValueError("`force` must be True or False")
path = filename if filename != "" else os.path.join(dir, name)
kwargs = dict([("dir",path), ("force",int(force)), ("_rest_version", 99)])
H2OConnection.get("Models.bin/"+model._model_json['model_id']['name'], **kwargs)
return path
def load_model(path):
"""
Load a saved H2O model from disk.
:param path: The full path of the H2O Model to be imported. For example, if the `dir` argument in h2o.saveModel was
set to "/Users/UserName/Desktop" then the `path` argument in h2o.loadModel should be set to something like
"/Users/UserName/Desktop/K-meansModel__a7cebf318ca5827185e209edf47c4052"
:return: the model
"""
if not isinstance(path, str): raise ValueError("`path` must be a non-empty character string")
kwargs = dict([("dir",path), ("_rest_version", 99)])
res = H2OConnection.post("Models.bin/", **kwargs)
return get_model(res.json()['models'][0]['model_id']['name'])
def cluster_status():
"""
TODO: This isn't really a cluster status... it's a node status check for the node we're connected to.
This is possibly confusing because this can come back without warning,
but if a user tries to do any remoteSend, they will get a "cloud sick warning"
Retrieve information on the status of the cluster running H2O.
:return: None
"""
cluster_json = H2OConnection.get_json("Cloud?skip_ticks=true")
print "Version: {0}".format(cluster_json['version'])
print "Cloud name: {0}".format(cluster_json['cloud_name'])
print "Cloud size: {0}".format(cluster_json['cloud_size'])
if cluster_json['locked']: print "Cloud is locked\n"
else: print "Accepting new members\n"
if cluster_json['nodes'] == None or len(cluster_json['nodes']) == 0:
print "No nodes found"
return
status = []
for node in cluster_json['nodes']:
for k, v in zip(node.keys(),node.values()):
if k in ["h2o", "healthy", "last_ping", "num_cpus", "sys_load", "mem_value_size", "total_value_size",
"free_mem", "tot_mem", "max_mem", "free_disk", "max_disk", "pid", "num_keys", "tcps_active",
"open_fds", "rpcs_active"]: status.append(k+": {0}".format(v))
print ', '.join(status)
print
def init(ip="localhost", port=54321, size=1, start_h2o=False, enable_assertions=False,
license=None, max_mem_size_GB=None, min_mem_size_GB=None, ice_root=None, strict_version_check=False):
"""
Initiate an H2O connection to the specified ip and port.
:param ip: A string representing the hostname or IP address of the server where H2O is running.
:param port: A port, default is 54321
:param size: THe expected number of h2o instances (ignored if start_h2o is True)
:param start_h2o: A boolean dictating whether this module should start the H2O jvm. An attempt is made anyways if _connect fails.
:param enable_assertions: If start_h2o, pass `-ea` as a VM option.s
:param license: If not None, is a path to a license file.
:param max_mem_size_GB: Maximum heap size (jvm option Xmx) in gigabytes.
:param min_mem_size_GB: Minimum heap size (jvm option Xms) in gigabytes.
:param ice_root: A temporary directory (default location is determined by tempfile.mkdtemp()) to hold H2O log files.
:return: None
"""
H2OConnection(ip=ip, port=port,start_h2o=start_h2o,enable_assertions=enable_assertions,license=license,max_mem_size_GB=max_mem_size_GB,min_mem_size_GB=min_mem_size_GB,ice_root=ice_root,strict_version_check=strict_version_check)
return None
def export_file(frame,path,force=False):
"""
Export a given H2OFrame to a path on the machine this python session is currently connected to. To view the current session, call h2o.cluster_info().
:param frame: The Frame to save to disk.
:param path: The path to the save point on disk.
:param force: Overwrite any preexisting file with the same path
:return: None
"""
frame._eager()
H2OJob(H2OConnection.get_json("Frames/"+frame._id+"/export/"+path+"/overwrite/"+("true" if force else "false")), "Export File").poll()
def cluster_info():
"""
Display the current H2O cluster information.
:return: None
"""
H2OConnection._cluster_info()
def shutdown(conn=None, prompt=True):
"""
Shut down the specified instance. All data will be lost.
This method checks if H2O is running at the specified IP address and port, and if it is, shuts down that H2O instance.
:param conn: An H2OConnection object containing the IP address and port of the server running H2O.
:param prompt: A logical value indicating whether to prompt the user before shutting down the H2O server.
:return: None
"""
if conn == None: conn = H2OConnection.current_connection()
H2OConnection._shutdown(conn=conn, prompt=prompt)
def deeplearning(x,y=None,validation_x=None,validation_y=None,training_frame=None,model_id=None,
overwrite_with_best_model=None,validation_frame=None,checkpoint=None,autoencoder=None,
use_all_factor_levels=None,activation=None,hidden=None,epochs=None,train_samples_per_iteration=None,
seed=None,adaptive_rate=None,rho=None,epsilon=None,rate=None,rate_annealing=None,rate_decay=None,
momentum_start=None,momentum_ramp=None,momentum_stable=None,nesterov_accelerated_gradient=None,
input_dropout_ratio=None,hidden_dropout_ratios=None,l1=None,l2=None,max_w2=None,initial_weight_distribution=None,
initial_weight_scale=None,loss=None,distribution=None,tweedie_power=None,score_interval=None,score_training_samples=None,
score_validation_samples=None,score_duty_cycle=None,classification_stop=None,regression_stop=None,quiet_mode=None,
max_confusion_matrix_size=None,max_hit_ratio_k=None,balance_classes=None,class_sampling_factors=None,
max_after_balance_size=None,score_validation_sampling=None,diagnostics=None,variable_importances=None,
fast_mode=None,ignore_const_cols=None,force_load_balance=None,replicate_training_data=None,single_node_mode=None,
shuffle_training_data=None,sparse=None,col_major=None,average_activation=None,sparsity_beta=None,
max_categorical_features=None,reproducible=None,export_weights_and_biases=None,offset_column=None,weights_column=None,
nfolds=None,fold_column=None,fold_assignment=None,keep_cross_validation_predictions=None):
"""
Build a supervised Deep Learning model
Performs Deep Learning neural networks on an H2OFrame
:param x: An H2OFrame containing the predictors in the model.
:param y: An H2OFrame of the response variable in the model.
:param training_frame: (Optional) An H2OFrame. Only used to retrieve weights, offset, or nfolds columns, if they aren't already provided in x.
:param model_id: (Optional) The unique id assigned to the resulting model. If none is given, an id will automatically be generated.
:param overwrite_with_best_model: Logical. If True, overwrite the final model with the best model found during training. Defaults to True.
:param validation_frame: (Optional) An H2OFrame object indicating the validation dataset used to construct the confusion matrix. If left blank, this defaults to the training data when nfolds = 0
:param checkpoint: "Model checkpoint (either key or H2ODeepLearningModel) to resume training with."
:param autoencoder: Enable auto-encoder for model building.
:param use_all_factor_levels: Logical. Use all factor levels of categorical variance. Otherwise the first factor level is omitted (without loss of accuracy). Useful for variable importances and auto-enabled for autoencoder.
:param activation: A string indicating the activation function to use. Must be either "Tanh", "TanhWithDropout", "Rectifier", "RectifierWithDropout", "Maxout", or "MaxoutWithDropout"
:param hidden: Hidden layer sizes (e.g. c(100,100))
:param epochs: How many times the dataset should be iterated (streamed), can be fractional
:param train_samples_per_iteration: Number of training samples (globally) per MapReduce iteration. Special values are: 0 one epoch; -1 all available data (e.g., replicated training data); or -2 auto-tuning (default)
:param seed: Seed for random numbers (affects sampling) - Note: only reproducible when running single threaded
:param adaptive_rate: Logical. Adaptive learning rate (ADAELTA)
:param rho: Adaptive learning rate time decay factor (similarity to prior updates)
:param epsilon: Adaptive learning rate parameter, similar to learn rate annealing during initial training phase. Typical values are between 1.0e-10 and 1.0e-4
:param rate: Learning rate (higher => less stable, lower => slower convergence)
:param rate_annealing: Learning rate annealing: \eqn{(rate)/(1 + rate_annealing*samples)
:param rate_decay: Learning rate decay factor between layers (N-th layer: \eqn{rate*\alpha^(N-1))
:param momentum_start: Initial momentum at the beginning of training (try 0.5)
:param momentum_ramp: Number of training samples for which momentum increases
:param momentum_stable: Final momentum after the amp is over (try 0.99)
:param nesterov_accelerated_gradient: Logical. Use Nesterov accelerated gradient (recommended)
:param input_dropout_ratio: A fraction of the features for each training row to be omitted from training in order to improve generalization (dimension sampling).
:param hidden_dropout_ratios: Input layer dropout ratio (can improve generalization) specify one value per hidden layer, defaults to 0.5
:param l1: L1 regularization (can add stability and improve generalization, causes many weights to become 0)
:param l2: L2 regularization (can add stability and improve generalization, causes many weights to be small)
:param max_w2: Constraint for squared sum of incoming weights per unit (e.g. Rectifier)
:param initial_weight_distribution: Can be "Uniform", "UniformAdaptive", or "Normal"
:param initial_weight_scale: Uniform: -value ... value, Normal: stddev
:param loss: Loss function: "Automatic", "CrossEntropy" (for classification only), "MeanSquare", "Absolute" (experimental) or "Huber" (experimental)
:param distribution: A character string. The distribution function of the response. Must be "AUTO", "bernoulli", "multinomial", "poisson", "gamma", "tweedie", "laplace", "huber" or "gaussian"
:param tweedie_power: Tweedie power (only for Tweedie distribution, must be between 1 and 2)
:param score_interval: Shortest time interval (in secs) between model scoring
:param score_training_samples: Number of training set samples for scoring (0 for all)
:param score_validation_samples: Number of validation set samples for scoring (0 for all)
:param score_duty_cycle: Maximum duty cycle fraction for scoring (lower: more training, higher: more scoring)
:param classification_stop: Stopping criterion for classification error fraction on training data (-1 to disable)
:param regression_stop: Stopping criterion for regression error (MSE) on training data (-1 to disable)
:param quiet_mode: Enable quiet mode for less output to standard output
:param max_confusion_matrix_size: Max. size (number of classes) for confusion matrices to be shown
:param max_hit_ratio_k: Max number (top K) of predictions to use for hit ratio computation(for multi-class only, 0 to disable)
:param balance_classes: Balance training data class counts via over/under-sampling (for imbalanced data)
:param class_sampling_factors: Desired over/under-sampling ratios per class (in lexicographic order). If not specified, sampling factors will be automatically computed to obtain class balance during training. Requires balance_classes.
:param max_after_balance_size: Maximum relative size of the training data after balancing class counts (can be less than 1.0)
:param score_validation_sampling: Method used to sample validation dataset for scoring
:param diagnostics: Enable diagnostics for hidden layers
:param variable_importances: Compute variable importances for input features (Gedeon method) - can be slow for large networks)
:param fast_mode: Enable fast mode (minor approximations in back-propagation)
:param ignore_const_cols: Ignore constant columns (no information can be gained anyway)
:param force_load_balance: Force extra load balancing to increase training speed for small datasets (to keep all cores busy)
:param replicate_training_data: Replicate the entire training dataset onto every node for faster training
:param single_node_mode: Run on a single node for fine-tuning of model parameters
:param shuffle_training_data: Enable shuffling of training data (recommended if training data is replicated and train_samples_per_iteration is close to \eqn{numRows*numNodes
:param sparse: Sparse data handling (Experimental)
:param col_major: Use a column major weight matrix for input layer. Can speed up forward propagation, but might slow down backpropagation (Experimental)
:param average_activation: Average activation for sparse auto-encoder (Experimental)
:param sparsity_beta: Sparsity regularization (Experimental)
:param max_categorical_features: Max. number of categorical features, enforced via hashing Experimental)
:param reproducible: Force reproducibility on small data (will be slow - only uses 1 thread)
:param export_weights_and_biases: Whether to export Neural Network weights and biases to H2O Frames"
:param offset_column: Specify the offset column.
:param weights_column: Specify the weights column.
:param nfolds: (Optional) Number of folds for cross-validation. If nfolds >= 2, then validation must remain empty.
:param fold_column: (Optional) Column with cross-validation fold index assignment per observation
:param fold_assignment: Cross-validation fold assignment scheme, if fold_column is not specified Must be "AUTO", "Random" or "Modulo"
:param keep_cross_validation_predictions: Whether to keep the predictions of the cross-validation models
:return: Return a new classifier or regression model.
"""
parms = {k:v for k,v in locals().items() if k in ["y","training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="deeplearning"
return h2o_model_builder.supervised(parms)
def autoencoder(x,training_frame=None,model_id=None,overwrite_with_best_model=None,checkpoint=None,
use_all_factor_levels=None,activation=None,hidden=None,epochs=None,train_samples_per_iteration=None,
seed=None,adaptive_rate=None,rho=None,epsilon=None,rate=None,rate_annealing=None,rate_decay=None,
momentum_start=None,momentum_ramp=None,momentum_stable=None,nesterov_accelerated_gradient=None,
input_dropout_ratio=None,hidden_dropout_ratios=None,l1=None,l2=None,max_w2=None,initial_weight_distribution=None,
initial_weight_scale=None,loss=None,distribution=None,tweedie_power=None,score_interval=None,score_training_samples=None,
score_duty_cycle=None,classification_stop=None,regression_stop=None,quiet_mode=None,
max_confusion_matrix_size=None,max_hit_ratio_k=None,balance_classes=None,class_sampling_factors=None,
max_after_balance_size=None,diagnostics=None,variable_importances=None,
fast_mode=None,ignore_const_cols=None,force_load_balance=None,replicate_training_data=None,single_node_mode=None,
shuffle_training_data=None,sparse=None,col_major=None,average_activation=None,sparsity_beta=None,
max_categorical_features=None,reproducible=None,export_weights_and_biases=None):
"""
Build unsupervised auto encoder using H2O Deeplearning
:param x: An H2OFrame containing the predictors in the model.
:param training_frame: (Optional) An H2OFrame. Only used to retrieve weights, offset, or nfolds columns, if they aren't already provided in x.
:param model_id: (Optional) The unique id assigned to the resulting model. If none is given, an id will automatically be generated.
:param overwrite_with_best_model: Logical. If True, overwrite the final model with the best model found during training. Defaults to True.
:param checkpoint: "Model checkpoint (either key or H2ODeepLearningModel) to resume training with."
:param use_all_factor_levels: Logical. Use all factor levels of categorical variance. Otherwise the first factor level is omitted (without loss of accuracy). Useful for variable importances and auto-enabled for autoencoder.
:param activation: A string indicating the activation function to use. Must be either "Tanh", "TanhWithDropout", "Rectifier", "RectifierWithDropout", "Maxout", or "MaxoutWithDropout"
:param hidden: Hidden layer sizes (e.g. c(100,100))
:param epochs: How many times the dataset should be iterated (streamed), can be fractional
:param train_samples_per_iteration: Number of training samples (globally) per MapReduce iteration. Special values are: 0 one epoch; -1 all available data (e.g., replicated training data); or -2 auto-tuning (default)
:param seed: Seed for random numbers (affects sampling) - Note: only reproducible when running single threaded
:param adaptive_rate: Logical. Adaptive learning rate (ADAELTA)
:param rho: Adaptive learning rate time decay factor (similarity to prior updates)
:param epsilon: Adaptive learning rate parameter, similar to learn rate annealing during initial training phase. Typical values are between 1.0e-10 and 1.0e-4
:param rate: Learning rate (higher => less stable, lower => slower convergence)
:param rate_annealing: Learning rate annealing: \eqn{(rate)/(1 + rate_annealing*samples)
:param rate_decay: Learning rate decay factor between layers (N-th layer: \eqn{rate*\alpha^(N-1))
:param momentum_start: Initial momentum at the beginning of training (try 0.5)
:param momentum_ramp: Number of training samples for which momentum increases
:param momentum_stable: Final momentum after the amp is over (try 0.99)
:param nesterov_accelerated_gradient: Logical. Use Nesterov accelerated gradient (recommended)
:param input_dropout_ratio: A fraction of the features for each training row to be omitted from training in order to improve generalization (dimension sampling).
:param hidden_dropout_ratios: Input layer dropout ratio (can improve generalization) specify one value per hidden layer, defaults to 0.5
:param l1: L1 regularization (can add stability and improve generalization, causes many weights to become 0)
:param l2: L2 regularization (can add stability and improve generalization, causes many weights to be small)
:param max_w2: Constraint for squared sum of incoming weights per unit (e.g. Rectifier)
:param initial_weight_distribution: Can be "Uniform", "UniformAdaptive", or "Normal"
:param initial_weight_scale: Uniform: -value ... value, Normal: stddev
:param loss: Loss function: "Automatic", "CrossEntropy" (for classification only), "MeanSquare", "Absolute" (experimental) or "Huber" (experimental)
:param distribution: A character string. The distribution function of the response. Must be "AUTO", "bernoulli", "multinomial", "poisson", "gamma", "tweedie", "laplace", "huber" or "gaussian"
:param tweedie_power: Tweedie power (only for Tweedie distribution, must be between 1 and 2)
:param score_interval: Shortest time interval (in secs) between model scoring
:param score_training_samples: Number of training set samples for scoring (0 for all)
:param score_duty_cycle: Maximum duty cycle fraction for scoring (lower: more training, higher: more scoring)
:param classification_stop: Stopping criterion for classification error fraction on training data (-1 to disable)
:param regression_stop: Stopping criterion for regression error (MSE) on training data (-1 to disable)
:param quiet_mode: Enable quiet mode for less output to standard output
:param max_confusion_matrix_size: Max. size (number of classes) for confusion matrices to be shown
:param max_hit_ratio_k: Max number (top K) of predictions to use for hit ratio computation(for multi-class only, 0 to disable)
:param balance_classes: Balance training data class counts via over/under-sampling (for imbalanced data)
:param class_sampling_factors: Desired over/under-sampling ratios per class (in lexicographic order). If not specified, sampling factors will be automatically computed to obtain class balance during training. Requires balance_classes.
:param max_after_balance_size: Maximum relative size of the training data after balancing class counts (can be less than 1.0)
:param diagnostics: Enable diagnostics for hidden layers
:param variable_importances: Compute variable importances for input features (Gedeon method) - can be slow for large networks)
:param fast_mode: Enable fast mode (minor approximations in back-propagation)
:param ignore_const_cols: Ignore constant columns (no information can be gained anyway)
:param force_load_balance: Force extra load balancing to increase training speed for small datasets (to keep all cores busy)
:param replicate_training_data: Replicate the entire training dataset onto every node for faster training
:param single_node_mode: Run on a single node for fine-tuning of model parameters
:param shuffle_training_data: Enable shuffling of training data (recommended if training data is replicated and train_samples_per_iteration is close to \eqn{numRows*numNodes
:param sparse: Sparse data handling (Experimental)
:param col_major: Use a column major weight matrix for input layer. Can speed up forward propagation, but might slow down backpropagation (Experimental)
:param average_activation: Average activation for sparse auto-encoder (Experimental)
:param sparsity_beta: Sparsity regularization (Experimental)
:param max_categorical_features: Max. number of categorical features, enforced via hashing Experimental)
:param reproducible: Force reproducibility on small data (will be slow - only uses 1 thread)
:param export_weights_and_biases: Whether to export Neural Network weights and biases to H2O Frames"
:return: Return a new autoencoder
"""
parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="deeplearning"
parms["autoencoder"]=True
return h2o_model_builder.unsupervised(parms)
def gbm(x,y,validation_x=None,validation_y=None,training_frame=None,model_id=None,
distribution=None,tweedie_power=None,ntrees=None,max_depth=None,min_rows=None,
learn_rate=None,nbins=None,nbins_cats=None,validation_frame=None,
balance_classes=None,max_after_balance_size=None,seed=None,build_tree_one_node=None,
nfolds=None,fold_column=None,fold_assignment=None,keep_cross_validation_predictions=None,
score_each_iteration=None,offset_column=None,weights_column=None,do_future=None,checkpoint=None):
"""
Builds gradient boosted classification trees, and gradient boosted regression trees on a parsed data set.
The default distribution function will guess the model type based on the response column typerun properly the
response column must be an numeric for "gaussian" or an enum for "bernoulli" or "multinomial".
:param x: An H2OFrame containing the predictors in the model.
:param y: An H2OFrame of the response variable in the model.
:param training_frame: (Optional) An H2OFrame. Only used to retrieve weights, offset, or nfolds columns, if they aren't already provided in x.
:param model_id: (Optional) The unique id assigned to the resulting model. If none is given, an id will automatically be generated.
:param distribution: A character string. The distribution function of the response. Must be "AUTO", "bernoulli", "multinomial", "poisson", "gamma", "tweedie" or "gaussian"
:param tweedie_power: Tweedie power (only for Tweedie distribution, must be between 1 and 2)
:param ntrees: A non-negative integer that determines the number of trees to grow.
:param max_depth: Maximum depth to grow the tree.
:param min_rows: Minimum number of rows to assign to terminal nodes.
:param learn_rate: An integer from 0.0 to 1.0
:param nbins: For numerical columns (real/int), build a histogram of this many bins, then split at the best point
:param nbins_cats: For categorical columns (enum), build a histogram of this many bins, then split at the best point. Higher values can lead to more overfitting.
:param validation_frame: An H2OFrame object indicating the validation dataset used to contruct the confusion matrix. If left blank, this defaults to the training data when nfolds = 0
:param balance_classes: logical, indicates whether or not to balance training data class counts via over/under-sampling (for imbalanced data)
:param max_after_balance_size: Maximum relative size of the training data after balancing class counts (can be less than 1.0)
:param seed: Seed for random numbers (affects sampling when balance_classes=T)
:param build_tree_one_node: Run on one node only; no network overhead but fewer cpus used. Suitable for small datasets.
:param nfolds: (Optional) Number of folds for cross-validation. If nfolds >= 2, then validation must remain empty.
:param fold_column: (Optional) Column with cross-validation fold index assignment per observation
:param fold_assignment: Cross-validation fold assignment scheme, if fold_column is not specified Must be "AUTO", "Random" or "Modulo"
:param keep_cross_validation_predictions: Whether to keep the predictions of the cross-validation models
:param score_each_iteration: Attempts to score each tree.
:param offset_column: Specify the offset column.
:param weights_column: Specify the weights column.
:return: A new classifier or regression model.
"""
parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="gbm"
return h2o_model_builder.supervised(parms)
def glm(x,y,validation_x=None,validation_y=None,training_frame=None,model_id=None,validation_frame=None,
max_iterations=None,beta_epsilon=None,solver=None,standardize=None,family=None,link=None,
tweedie_variance_power=None,tweedie_link_power=None,alpha=None,prior=None,lambda_search=None,
nlambdas=None,lambda_min_ratio=None,beta_constraints=None,offset_column=None,weights_column=None,
nfolds=None,fold_column=None,fold_assignment=None,keep_cross_validation_predictions=None,
intercept=None, Lambda=None, do_future=None, checkpoint=None):
"""
Build a Generalized Linear Model
Fit a generalized linear model, specified by a response variable, a set of predictors, and a description of the error
distribution.
:param x: An H2OFrame containing the predictors in the model.
:param y: An H2OFrame of the response variable in the model.
:param training_frame: (Optional) An H2OFrame. Only used to retrieve weights, offset, or nfolds columns, if they aren't already provided in x.
:param model_id: (Optional) The unique id assigned to the resulting model. If none is given, an id will automatically be generated.
:param validation_frame: An H2OFrame object containing the variables in the model.
:param max_iterations: A non-negative integer specifying the maximum number of iterations.
:param beta_epsilon: A non-negative number specifying the magnitude of the maximum difference between the coefficient estimates from successive iterations. Defines the convergence criterion for h2o.glm.
:param solver: A character string specifying the solver used: IRLSM (supports more features), L_BFGS (scales better for datasets with many columns)
:param standardize: A logical value indicating whether the numeric predictors should be standardized to have a mean of 0 and a variance of 1 prior to training the models.
:param family: A character string specifying the distribution of the model: gaussian, binomial, poisson, gamma, tweedie.
:param link: A character string specifying the link function. The default is the canonical link for the family.
The supported links for each of the family specifications are:
"gaussian": "identity", "log", "inverse"
"binomial": "logit", "log"
"poisson": "log", "identity"
"gamma": "inverse", "log", "identity"
"tweedie": "tweedie"
:param tweedie_variance_power: A numeric specifying the power for the variance function when family = "tweedie".
:param tweedie_link_power: A numeric specifying the power for the link function when family = "tweedie".
:param alpha: A numeric in [0, 1] specifying the elastic-net mixing parameter.
The elastic-net penalty is defined to be:
eqn{P(\alpha,\beta) = (1-\alpha)/2||\beta||_2^2 + \alpha||\beta||_1 = \sum_j [(1-\alpha)/2 \beta_j^2 + \alpha|\beta_j|],
making alpha = 1 the lasso penalty and alpha = 0 the ridge penalty.
:param Lambda: A non-negative shrinkage parameter for the elastic-net, which multiplies \eqn{P(\alpha,\beta) in the objective function. When lambda = 0, no elastic-net penalty is applied and ordinary generalized linear models are fit.
:param prior: (Optional) A numeric specifying the prior probability of class 1 in the response when family = "binomial". The default prior is the observational frequency of class 1.
:param lambda_search: A logical value indicating whether to conduct a search over the space of lambda values starting from the lambda max, given lambda is interpreted as lambda min.
:param nlambdas: The number of lambda values to use when lambda_search = TRUE.
:param lambda_min_ratio: Smallest value for lambda as a fraction of lambda.max. By default if the number of
observations is greater than the the number of variables then lambda_min_ratio = 0.0001; if the number of
observations is less than the number of variables then lambda_min_ratio = 0.01.
:param beta_constraints: A data.frame or H2OParsedData object with the columns ["names", "lower_bounds",
"upper_bounds", "beta_given"], where each row corresponds to a predictor in the GLM. "names" contains the predictor
names, "lower"/"upper_bounds", are the lower and upper bounds of beta, and "beta_given" is some supplied starting
values for the
:param offset_column: Specify the offset column.
:param weights_column: Specify the weights column.
:param nfolds: (Optional) Number of folds for cross-validation. If nfolds >= 2, then validation must remain empty.
:param fold_column: (Optional) Column with cross-validation fold index assignment per observation
:param fold_assignment: Cross-validation fold assignment scheme, if fold_column is not specified Must be "AUTO", "Random" or "Modulo"
:param keep_cross_validation_predictions: Whether to keep the predictions of the cross-validation models
:param intercept: Logical, include constant term (intercept) in the model
:return: A subclass of ModelBase is returned. The specific subclass depends on the machine learning task at hand (if
it's binomial classification, then an H2OBinomialModel is returned, if it's regression then a H2ORegressionModel is
returned). The default print-out of the models is shown, but further GLM-specifc information can be queried out of
the object.
Upon completion of the GLM, the resulting object has coefficients, normalized coefficients, residual/null deviance,
aic, and a host of model metrics including MSE, AUC (for logistic regression), degrees of freedom, and confusion
matrices.
"""
parms = {k.lower():v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="glm"
return h2o_model_builder.supervised(parms)
def start_glm_job(x,y,validation_x=None,validation_y=None,**kwargs):
"""
Build a Generalized Linear Model
Note: this function is the same as glm(), but it doesn't block on model-build. Instead, it returns and H2OModelFuture
object immediately. The model can be retrieved from the H2OModelFuture object with get_future_model().
:return: H2OModelFuture
"""
kwargs["do_future"] = True
return glm(x,y,validation_x,validation_y,**kwargs)
def kmeans(x,validation_x=None,k=None,model_id=None,max_iterations=None,standardize=None,init=None,seed=None,
nfolds=None,fold_column=None,fold_assignment=None,training_frame=None,validation_frame=None,
user_points=None,ignored_columns=None,score_each_iteration=None,keep_cross_validation_predictions=None,
ignore_const_cols=None,checkpoint=None):
"""
Performs k-means clustering on an H2O dataset.
:param x: (Optional) A vector containing the data columns on which k-means operates.
:param k: The number of clusters. Must be between 1 and 1e7 inclusive. k may be omitted if the user specifies the
initial centers in the init parameter. If k is not omitted, in this case, then it should be equal to the number of
user-specified centers.
:param model_id: (Optional) The unique id assigned to the resulting model. If none is given, an id will automatically be generated.
:param max_iterations: The maximum number of iterations allowed. Must be between 0 and 1e6 inclusive.
:param standardize: Logical, indicates whether the data should be standardized before running k-means.
:param init: A character string that selects the initial set of k cluster centers. Possible values are "Random": for
random initialization, "PlusPlus": for k-means plus initialization, or "Furthest": for initialization at the furthest
point from each successive center. Additionally, the user may specify a the initial centers as a matrix, data.frame,
H2OFrame, or list of vectors. For matrices, data.frames, and H2OFrames, each row of the respective structure is an
initial center. For lists of vectors, each vector is an initial center.
:param seed: (Optional) Random seed used to initialize the cluster centroids.
:param nfolds: (Optional) Number of folds for cross-validation. If nfolds >= 2, then validation must remain empty.
:param fold_column: (Optional) Column with cross-validation fold index assignment per observation
:param fold_assignment: Cross-validation fold assignment scheme, if fold_column is not specified Must be "AUTO", "Random" or "Modulo"
:return: Returns an object of class H2OClusteringModel.
"""
parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="kmeans"
return h2o_model_builder.unsupervised(parms)
def random_forest(x,y,validation_x=None,validation_y=None,training_frame=None,model_id=None,mtries=None,sample_rate=None,
build_tree_one_node=None,ntrees=None,max_depth=None,min_rows=None,nbins=None,nbins_cats=None,
binomial_double_trees=None,validation_frame=None,balance_classes=None,max_after_balance_size=None,
seed=None,offset_column=None,weights_column=None,nfolds=None,fold_column=None,fold_assignment=None,
keep_cross_validation_predictions=None,checkpoint=None):
"""
Build a Big Data Random Forest Model
Builds a Random Forest Model on an H2OFrame
:param x: An H2OFrame containing the predictors in the model.
:param y: An H2OFrame of the response variable in the model.
:param training_frame: (Optional) An H2OFrame. Only used to retrieve weights, offset, or nfolds columns, if they aren't already provided in x.
:param model_id: (Optional) The unique id assigned to the resulting model. If none is given, an id will automatically be generated.
:param mtries: Number of variables randomly sampled as candidates at each split. If set to -1, defaults to sqrt{p} for classification, and p/3 for regression, where p is the number of predictors.
:param sample_rate: Sample rate, from 0 to 1.0.
:param build_tree_one_node: Run on one node only; no network overhead but fewer cpus used. Suitable for small datasets.
:param ntrees: A nonnegative integer that determines the number of trees to grow.
:param max_depth: Maximum depth to grow the tree.
:param min_rows: Minimum number of rows to assign to teminal nodes.
:param nbins: For numerical columns (real/int), build a histogram of this many bins, then split at the best point.
:param nbins_cats: For categorical columns (enum), build a histogram of this many bins, then split at the best point. Higher values can lead to more overfitting.
:param binomial_double_trees: For binary classification: Build 2x as many trees (one per class) - can lead to higher accuracy.
:param validation_frame: An H2OFrame object containing the variables in the model.
:param balance_classes: logical, indicates whether or not to balance training data class counts via over/under-sampling (for imbalanced data)
:param max_after_balance_size: Maximum relative size of the training data after balancing class counts (can be less than 1.0)
:param seed: Seed for random numbers (affects sampling) - Note: only reproducible when running single threaded
:param offset_column: Specify the offset column.
:param weights_column: Specify the weights column.
:param nfolds: (Optional) Number of folds for cross-validation. If nfolds >= 2, then validation must remain empty.
:param fold_column: (Optional) Column with cross-validation fold index assignment per observation
:param fold_assignment: Cross-validation fold assignment scheme, if fold_column is not specified Must be "AUTO", "Random" or "Modulo"
:param keep_cross_validation_predictions: Whether to keep the predictions of the cross-validation models
:return: A new classifier or regression model.
"""
parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="drf"
return h2o_model_builder.supervised(parms)
def prcomp(x,validation_x=None,k=None,model_id=None,max_iterations=None,transform=None,seed=None,use_all_factor_levels=None,
training_frame=None,validation_frame=None,pca_method=None):
"""
Principal components analysis of a H2O dataset using the power method
to calculate the singular value decomposition of the Gram matrix.
:param k: The number of principal components to be computed. This must be between 1 and min(ncol(training_frame),
nrow(training_frame)) inclusive.
:param model_id: (Optional) The unique hex key assigned to the resulting model. Automatically generated if none
is provided.
:param max_iterations: The maximum number of iterations to run each power iteration loop. Must be between 1 and
1e6 inclusive.
:param transform: A character string that indicates how the training data should be transformed before running PCA.
Possible values are "NONE": for no transformation, "DEMEAN": for subtracting the mean of each column, "DESCALE":
for dividing by the standard deviation of each column, "STANDARDIZE": for demeaning and descaling, and "NORMALIZE":
for demeaning and dividing each column by its range (max - min).
:param seed: (Optional) Random seed used to initialize the right singular vectors at the beginning of each power
method iteration.
:param use_all_factor_levels: (Optional) A logical value indicating whether all factor levels should be included
in each categorical column expansion. If FALSE, the indicator column corresponding to the first factor level of
every categorical variable will be dropped. Defaults to FALSE.
:return: a new dim reduction model
"""
parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="pca"
return h2o_model_builder.unsupervised(parms)
def svd(x,validation_x=None,nv=None,max_iterations=None,transform=None,seed=None,use_all_factor_levels=None,
training_frame=None, validation_frame=None):
"""
Singular value decomposition of a H2O dataset using the power method.
:param nv: The number of right singular vectors to be computed. This must be between 1 and min(ncol(training_frame),
nrow(training_frame)) inclusive.
:param max_iterations: The maximum number of iterations to run each power iteration loop. Must be between 1 and
1e6 inclusive.max_iterations The maximum number of iterations to run each power iteration loop. Must be between 1
and 1e6 inclusive.
:param transform: A character string that indicates how the training data should be transformed before running PCA.
Possible values are "NONE": for no transformation, "DEMEAN": for subtracting the mean of each column, "DESCALE": for
dividing by the standard deviation of each column, "STANDARDIZE": for demeaning and descaling, and "NORMALIZE": for
demeaning and dividing each column by its range (max - min).
:param seed: (Optional) Random seed used to initialize the right singular vectors at the beginning of each power
method iteration.
:param use_all_factor_levels: (Optional) A logical value indicating whether all factor levels should be included in
each categorical column expansion. If FALSE, the indicator column corresponding to the first factor level of every
categorical variable will be dropped. Defaults to TRUE.
:return: a new dim reduction model
"""
parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="svd"
parms['_rest_version']=99
return h2o_model_builder.unsupervised(parms)
def naive_bayes(x,y,validation_x=None,validation_y=None,training_frame=None,validation_frame=None,
laplace=None,threshold=None,eps=None,compute_metrics=None,offset_column=None,weights_column=None,
balance_classes=None,max_after_balance_size=None, nfolds=None,fold_column=None,fold_assignment=None,
keep_cross_validation_predictions=None,checkpoint=None):
"""
The naive Bayes classifier assumes independence between predictor variables conditional on the response, and a
Gaussian distribution of numeric predictors with mean and standard deviation computed from the training dataset.
When building a naive Bayes classifier, every row in the training dataset that contains at least one NA will be
skipped completely. If the test dataset has missing values, then those predictors are omitted in the probability
calculation during prediction.
:param laplace: A positive number controlling Laplace smoothing. The default zero disables smoothing.
:param threshold: The minimum standard deviation to use for observations without enough data. Must be at least 1e-10.
:param eps: A threshold cutoff to deal with numeric instability, must be positive.
:param compute_metrics: A logical value indicating whether model metrics should be computed. Set to FALSE to reduce the runtime of the algorithm.
:param training_frame: Training Frame
:param validation_frame: Validation Frame
:param offset_column: Specify the offset column.
:param weights_column: Specify the weights column.
:param nfolds: (Optional) Number of folds for cross-validation. If nfolds >= 2, then validation must remain empty.
:param fold_column: (Optional) Column with cross-validation fold index assignment per observation
:param fold_assignment: Cross-validation fold assignment scheme, if fold_column is not specified Must be "AUTO", "Random" or "Modulo"
:param keep_cross_validation_predictions: Whether to keep the predictions of the cross-validation models
:return: Returns an H2OBinomialModel if the response has two categorical levels, H2OMultinomialModel otherwise.
"""
parms = {k:v for k,v in locals().items() if k in ["training_frame", "validation_frame", "validation_x", "validation_y", "offset_column", "weights_column", "fold_column"] or v is not None}
parms["algo"]="naivebayes"
return h2o_model_builder.supervised(parms)
def create_frame(id = None, rows = 10000, cols = 10, randomize = True, value = 0, real_range = 100,
categorical_fraction = 0.2, factors = 100, integer_fraction = 0.2, integer_range = 100,
binary_fraction = 0.1, binary_ones_fraction = 0.02, missing_fraction = 0.01, response_factors = 2,
has_response = False, seed=None):
"""
Data Frame Creation in H2O.
Creates a data frame in H2O with real-valued, categorical, integer, and binary columns specified by the user.
:param id: A string indicating the destination key. If empty, this will be auto-generated by H2O.
:param rows: The number of rows of data to generate.
:param cols: The number of columns of data to generate. Excludes the response column if has_response == True.
:param randomize: A logical value indicating whether data values should be randomly generated. This must be TRUE if
either categorical_fraction or integer_fraction is non-zero.
:param value: If randomize == FALSE, then all real-valued entries will be set to this value.
:param real_range: The range of randomly generated real values.
:param categorical_fraction: The fraction of total columns that are categorical.
:param factors: The number of (unique) factor levels in each categorical column.
:param integer_fraction: The fraction of total columns that are integer-valued.
:param integer_range: The range of randomly generated integer values.
:param binary_fraction: The fraction of total columns that are binary-valued.
:param binary_ones_fraction: The fraction of values in a binary column that are set to 1.
:param missing_fraction: The fraction of total entries in the data frame that are set to NA.
:param response_factors: If has_response == TRUE, then this is the number of factor levels in the response column.
:param has_response: A logical value indicating whether an additional response column should be pre-pended to the
final H2O data frame. If set to TRUE, the total number of columns will be cols+1.
:param seed: A seed used to generate random values when randomize = TRUE.
:return: the H2OFrame that was created
"""
parms = {"dest": _py_tmp_key() if id is None else id,
"rows": rows,
"cols": cols,
"randomize": randomize,
"value": value,
"real_range": real_range,
"categorical_fraction": categorical_fraction,
"factors": factors,
"integer_fraction": integer_fraction,
"integer_range": integer_range,
"binary_fraction": binary_fraction,
"binary_ones_fraction": binary_ones_fraction,
"missing_fraction": missing_fraction,
"response_factors": response_factors,
"has_response": has_response,
"seed": -1 if seed is None else seed,
}
H2OJob(H2OConnection.post_json("CreateFrame", **parms), "Create Frame").poll()
return get_frame(parms["dest"])
def interaction(data, factors, pairwise, max_factors, min_occurrence, destination_frame=None):
"""
Categorical Interaction Feature Creation in H2O.
Creates a frame in H2O with n-th order interaction features between categorical columns, as specified by
the user.
:param data: the H2OFrame that holds the target categorical columns.
:param factors: factors Factor columns (either indices or column names).
:param pairwise: Whether to create pairwise interactions between factors (otherwise create one
higher-order interaction). Only applicable if there are 3 or more factors.
:param max_factors: Max. number of factor levels in pair-wise interaction terms (if enforced, one extra catch-all
factor will be made)
:param min_occurrence: Min. occurrence threshold for factor levels in pair-wise interaction terms
:param destination_frame: A string indicating the destination key. If empty, this will be auto-generated by H2O.
:return: H2OFrame
"""
data._eager()
factors = [data.names()[n] if isinstance(n,int) else n for n in factors]
parms = {"dest": _py_tmp_key() if destination_frame is None else destination_frame,
"source_frame": data._id,
"factor_columns": [_quoted(f) for f in factors],
"pairwise": pairwise,
"max_factors": max_factors,
"min_occurrence": min_occurrence,
}
H2OJob(H2OConnection.post_json("Interaction", **parms), "Interactions").poll()
return get_frame(parms["dest"])
def network_test():
res = H2OConnection.get_json(url_suffix="NetworkTest")
res["table"].show()
def locate(path):
"""
Search for a relative path and turn it into an absolute path.
This is handy when hunting for data files to be passed into h2o and used by import file.
Note: This function is for unit testing purposes only.
:param path: Path to search for
:return: Absolute path if it is found. None otherwise.
"""
tmp_dir = os.path.realpath(os.getcwd())
possible_result = os.path.join(tmp_dir, path)
while (True):
if (os.path.exists(possible_result)):
return possible_result
next_tmp_dir = os.path.dirname(tmp_dir)
if (next_tmp_dir == tmp_dir):
raise ValueError("File not found: " + path)
tmp_dir = next_tmp_dir
possible_result = os.path.join(tmp_dir, path)
def store_size():
"""
Get the H2O store size (current count of keys).
:return: number of keys in H2O cloud
"""
return rapids("(store_size)")["result"]
def keys_leaked(num_keys):
"""
Ask H2O if any keys leaked.
@param num_keys: The number of keys that should be there.
:return: A boolean True/False if keys leaked. If keys leaked, check H2O logs for further detail.
"""
return rapids("keys_leaked #{})".format(num_keys))["result"]=="TRUE"
def as_list(data, use_pandas=True):
"""
Convert an H2O data object into a python-specific object.
WARNING: This will pull all data local!
If Pandas is available (and use_pandas is True), then pandas will be used to parse the data frame.
Otherwise, a list-of-lists populated by character data will be returned (so the types of data will
all be str).
:param data: An H2O data object.
:param use_pandas: Try to use pandas for reading in the data.
:return: List of list (Rows x Columns).
"""
return H2OFrame.as_data_frame(data, use_pandas)
def set_timezone(tz):
"""
Set the Time Zone on the H2O Cloud
:param tz: The desired timezone.
:return: None
"""
rapids(ExprNode("setTimeZone", tz)._eager())
def get_timezone():
"""
Get the Time Zone on the H2O Cloud
:return: the time zone (string)
"""
return H2OFrame(expr=ExprNode("getTimeZone"))._scalar()
def list_timezones():
"""
Get a list of all the timezones
:return: the time zones (as an H2OFrame)
"""
return H2OFrame(expr=ExprNode("listTimeZones"))._frame()
class H2ODisplay:
"""
Pretty printing for H2O Objects;
Handles both IPython and vanilla console display
"""
THOUSANDS = "{:,}"
def __init__(self,table=None,header=None,table_header=None,**kwargs):
self.table_header=table_header
self.header=header
self.table=table
self.kwargs=kwargs
self.do_print=True
# one-shot display... never return an H2ODisplay object (or try not to)
# if holding onto a display object, then may have odd printing behavior
# the __repr__ and _repr_html_ methods will try to save you from many prints,
# but just be WARNED that your mileage may vary!
#
# In other words, it's better to just new one of these when you're ready to print out.
if self.table_header is not None:
print
print self.table_header + ":"
print
if H2ODisplay._in_ipy():
from IPython.display import display
display(self)
self.do_print=False
else:
self.pprint()
self.do_print=False
# for Ipython
def _repr_html_(self):
if self.do_print:
return H2ODisplay._html_table(self.table,self.header)
def pprint(self):
r = self.__repr__()
print r
# for python REPL console
def __repr__(self):
if self.do_print or not H2ODisplay._in_ipy():
if self.header is None: return tabulate.tabulate(self.table,**self.kwargs)
else: return tabulate.tabulate(self.table,headers=self.header,**self.kwargs)
self.do_print=True
return ""
@staticmethod
def _in_ipy(): # are we in ipy? then pretty print tables with _repr_html
try:
__IPYTHON__
return True
except NameError:
return False
# some html table builder helper things
@staticmethod
def _html_table(rows, header=None):
table= "<div style=\"overflow:auto\"><table style=\"width:50%\">{}</table></div>" # keep table in a div for scroll-a-bility
table_rows=[]
if header is not None:
table_rows.append(H2ODisplay._html_row(header))
for row in rows:
table_rows.append(H2ODisplay._html_row(row))
return table.format("\n".join(table_rows))
@staticmethod
def _html_row(row):
res = "<tr>{}</tr>"
entry = "<td>{}</td>"
entries = "\n".join([entry.format(str(r)) for r in row])
return res.format(entries)
def can_use_pandas():
try:
imp.find_module('pandas')
return True
except ImportError:
return False
# ALL DEPRECATED METHODS BELOW #
def h2o_deprecated(newfun=None):
def o(fun):
if newfun is not None: m = "{} is deprecated. Use {}.".format(fun.__name__,newfun.__name__)
else: m = "{} is deprecated.".format(fun.__name__)
@functools.wraps(fun)
def i(*args, **kwargs):
print
print
warnings.warn(m, category=DeprecationWarning, stacklevel=2)
return fun(*args, **kwargs)
return i
return o
@h2o_deprecated(import_file)
def import_frame(path=None):
"""
Deprecated for import_file.
:param path: A path specifiying the location of the data to import.
:return: A new H2OFrame
"""
warnings.warn("deprecated: Use import_file", DeprecationWarning)
return import_file(path)
|
weaver-viii/h2o-3
|
h2o-py/h2o/h2o.py
|
Python
|
apache-2.0
| 76,615
|
[
"Gaussian"
] |
c742975c72b74d54b409f2f03bb6e4c2a8e78d892c3d4da8ad2a00b83bbd45d0
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module containing interface with SciKit-Learn clustering
Created on Feb 13, 2015
@author: senrs
TODO:
For Clustering:
1) paralleization: n_jobs parameter to some of the algorithms
"""
#for future compatibility with Python 3-----------------------------------------
from __future__ import division, print_function, unicode_literals, absolute_import
#End compatibility block for Python 3-------------------------------------------
#External Modules---------------------------------------------------------------
import scipy.cluster as hier
import numpy as np
import abc
import ast
import copy
import platform
#External Modules End-----------------------------------------------------------
#Internal Modules---------------------------------------------------------------
from utils import utils
from utils import mathUtils
from BaseClasses import MessageUser
from EntityFactoryBase import EntityFactory
#Internal Modules End-----------------------------------------------------------
# FIXME: temporarily force to use Agg backend for now, otherwise it will cause segmental fault for test:
# test_dataMiningHierarchical.xml in tests/framework/PostProcessors/DataMiningPostProcessor/Clustering
# For the record, when using dendrogram, we have to force matplotlib.use('Agg')
# In the future, I think all the plots should moved to OutStreamPlots -- wangc
#display = utils.displayAvailable()
#if not display:
# matplotlib.use('Agg')
if utils.displayAvailable() and platform.system() != 'Windows':
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pylab as plt
class unSupervisedLearning(utils.metaclass_insert(abc.ABCMeta), MessageUser):
"""
This is the general interface to any unSuperisedLearning learning method.
Essentially it contains a train, and evaluate methods
"""
returnType = '' ## this describe the type of information generated the
## possibility are 'boolean', 'integer', 'float'
modelType = '' ## the broad class of the interpolator
@staticmethod
def checkArrayConsistency(arrayIn):
"""
This method checks the consistency of the in-array
@ In, arrayIn, a 1D numpy array, the array to validate
@ Out, (consistent, errorMsg), tuple,
consistent is a boolean where false means the input array is not a
1D numpy array.
errorMsg, string, the error message if the input array is inconsistent.
"""
if type(arrayIn) != np.ndarray:
return (False, ' The object is not a numpy array')
## The input data matrix kind is different for different clustering
## algorithms, e.g.:
## [n_samples, n_features] for MeanShift and KMeans
## [n_samples,n_samples] for AffinityPropogation and SpectralCLustering
## In other words, MeanShift and KMeans work with points in a vector space,
## whereas AffinityPropagation and SpectralClustering can work with
## arbitrary objects, as long as a similarity measure exists for such
## objects. The input matrix supplied to unSupervisedLearning models as 1-D
## arrays of size [n_samples], (either n_features of or n_samples of them)
if len(arrayIn.shape) != 1:
return(False, ' The array must be 1-d')
return (True, '')
def __init__(self, **kwargs):
"""
constructor for unSupervisedLearning class.
@ In, kwargs, dict, arguments for the unsupervised learning algorithm
"""
super().__init__()
self.printTag = 'unSupervised'
## booleanFlag that controls the normalization procedure. If true, the
## normalization is performed. Default = True
if kwargs != None:
self.initOptionDict = kwargs
else:
self.initOptionDict = {}
## Labels are passed, if known a priori (optional), they used in quality
## estimate
if 'Labels' in self.initOptionDict.keys():
self.labelFeature = self.initOptionDict['Labels']
self.initOptionDict.pop('Labels')
else:
self.labelFeature = None
if 'Features' in self.initOptionDict.keys():
self.features = self.initOptionDict['Features'].split(',')
self.initOptionDict.pop('Features')
else:
self.features = None
if 'verbosity' in self.initOptionDict:
self.verbosity = self.initOptionDict['verbosity']
self.initOptionDict.pop('verbosity')
else:
self.verbosity = None
# average value and sigma are used for normalization of the feature data
# a dictionary where for each feature a tuple (average value, sigma)
self.muAndSigmaFeatures = {}
#these need to be declared in the child classes!!!!
self.amITrained = False
## The normalized training data
self.normValues = None
def updateFeatures(self, features):
"""
Change the Features that this classifier targets. If this ROM is trained already, raises an error.
@ In, features, list(str), list of new features
@ Out, None
"""
self.raiseAWarning('Features for learning engine type "{}" have been reset, so ROM is untrained!'.format(self.printTag))
self.amITrained = False
self.features = features
def train(self, tdict, metric=None):
"""
Method to perform the training of the unSuperVisedLearning algorithm
NB. The unSuperVisedLearning object is committed to convert the dictionary
that is passed (in), into the local format the interface with the kernels
requires. So far the base class will do the translation into numpy.
@ In, tdict, dict, training dictionary
@ Out, None
"""
self.metric = metric
if not isinstance(tdict, dict):
self.raiseAnError(IOError, ' method "train". The training set needs to be provided through a dictionary. Type of the in-object is ' + str(type(tdict)))
featureCount = len(self.features)
if not isinstance(tdict[utils.first(tdict.keys())],dict):
realizationCount = utils.first(tdict.values()).size
############################################################################
## Error-handling
## Do all of our error handling upfront to make the meat of the code more
## readable:
## Check if the user requested something that is not available
unidentifiedFeatures = set(self.features) - set(tdict.keys())
if len(unidentifiedFeatures) > 0:
## Me write English good!
if len(unidentifiedFeatures) == 1:
msg = 'The requested feature: %s does not exist in the training set.' % list(unidentifiedFeatures)[0]
else:
msg = 'The requested features: %s do not exist in the training set.' % str(list(unidentifiedFeatures))
self.raiseAnError(IOError, msg)
## Check that all of the values have the same length
if not isinstance(utils.first(tdict.values()), dict):
for name, val in tdict.items():
if name in self.features and realizationCount != val.size:
self.raiseAnError(IOError, ' In training set, the number of realizations are inconsistent among the requested features.')
## Check if a label feature is provided by the user and in the training data
if self.labelFeature in tdict:
self.labelValues = tidct[self.labelFeature]
resp = self.checkArrayConsistency(self.labelValues)
if not resp[0]:
self.raiseAnError(IOError, 'In training set for ground truth labels ' + self.labelFeature + ':' + resp[1])
else:
self.raiseAWarning(' The ground truth labels are not known a priori')
self.labelValues = None
## Not sure when this would ever happen, but check that the data you are
## given is a 1D array?
# for name,val in tdict.items():
# if name in self.features:
# resp = self.checkArrayConsistency(val)
# if not resp[0]:
# self.raiseAnError(IOError, ' In training set for feature ' + name + ':' + resp[1])
## End Error-handling
############################################################################
if metric is None:
self.normValues = np.zeros(shape = (realizationCount, featureCount))
for cnt, feat in enumerate(self.features):
featureValues = tdict[feat]
(mu,sigma) = mathUtils.normalizationFactors(featureValues)
## Store the normalized training data, and the normalization factors for
## later use
self.normValues[:, cnt] = (featureValues - mu) / sigma
self.muAndSigmaFeatures[feat] = (mu,sigma)
else:
# metric != None
## The dictionary represents a HistorySet
if isinstance(utils.first(tdict.values()),dict):
## normalize data
## But why this way? This should be one of the options, this looks like
## a form of shape matching, however what if I don't want similar
## shapes, I want similar valued curves in space? sigma and mu should
## not be forced to be computed within a curve.
tdictNorm={}
for key in tdict:
tdictNorm[key]={}
for var in tdict[key]:
(mu,sigma) = mathUtils.normalizationFactors(tdict[key][var])
tdictNorm[key][var] = (tdict[key][var]-mu)/sigma
cardinality = len(tdictNorm.keys())
self.normValues = np.zeros((cardinality,cardinality))
keys = list(tdictNorm.keys())
for i in range(cardinality):
for j in range(i,cardinality):
# process the input data for the metric, numpy.array is required
assert(list(tdictNorm[keys[i]].keys()) == list(tdictNorm[keys[j]].keys()))
numParamsI = len(tdictNorm[keys[i]].keys())
numStepsI = len(utils.first(tdictNorm[keys[i]].values()))
numStepsJ = len(utils.first(tdictNorm[keys[j]].values()))
inputI = np.empty((numParamsI, numStepsI))
inputJ = np.empty((numParamsI, numStepsJ))
for ind, params in enumerate(tdictNorm[keys[i]].keys()):
valueI = tdictNorm[keys[i]][params]
valueJ = tdictNorm[keys[j]][params]
inputI[ind] = valueI
inputJ[ind] = valueJ
pairedData = ((inputI,None), (inputJ,None))
# FIXME: Using loops can be very slow for large number of realizations
self.normValues[i][j] = metric.evaluate(pairedData)
if i != j:
self.normValues[j][i] = self.normValues[i][j]
else:
## PointSet
normValues = np.zeros(shape = (realizationCount, featureCount))
self.normValues = np.zeros(shape = (realizationCount, realizationCount))
for cnt, feat in enumerate(self.features):
featureValues = tdict[feat]
(mu,sigma) = mathUtils.normalizationFactors(featureValues)
normValues[:, cnt] = (featureValues - mu) / sigma
# compute the pairwised distance for given matrix
self.normValues = metric.evaluatePairwise((normValues,None))
self.__trainLocal__()
self.amITrained = True
## I'd be willing to bet this never gets called, and if it did it would crash
## under specific settings, namely using a history set. - unknown (maybe Dan?)
## -> for the record, I call it to get the labels in the ROMCollection.Clusters - talbpaul
def evaluate(self, edict):
"""
Method to perform the evaluation of a point or a set of points through
the previous trained unSuperVisedLearning algorithm
NB. The superVisedLearning object is committed to convert the dictionary
that is passed (in), into the local format the interface with the kernels
requires.
@ In, edict, dict, evaluation dictionary
@ Out, evaluation, numpy.array, array of evaluated points
"""
if not self.amITrained:
self.raiseAnError('ROM must be trained before evaluating!')
if not isinstance(edict, dict):
self.raiseAnError(IOError, ' Method "evaluate". The evaluate request/s need/s to be provided through a dictionary. Type of the in-object is ' + str(type(edict)))
names = edict.keys()
realizationCount = utils.first(edict.values()).size
featureCount = len(self.features)
############################################################################
## Error-handling
## Do all of our error handling upfront to make the meat of the code more
## readable:
## Check if the user requested something that is not available
unidentifiedFeatures = set(self.features) - set(edict.keys())
if len(unidentifiedFeatures) > 0:
## Me write English good!
if len(unidentifiedFeatures) == 1:
msg = 'The requested feature: %s does not exist in the evaluate set.' % list(unidentifiedFeatures)[0]
else:
msg = 'The requested features: %s do not exist in the evaluate set.' % str(list(unidentifiedFeatures))
self.raiseAnError(IOError, msg)
for name,values in edict.items():
resp = self.checkArrayConsistency(values)
if not resp[0]:
self.raiseAnError(IOError, ' In evaluate request for feature ' + name + ':' + resp[1])
## End Error-handling
############################################################################
## I don't think this is necessary?
# if self.labelFeature in edict.keys():
# self.labelValues = edict[self.labelFeature]
# construct the evaluation matrix
normedValues = np.zeros(shape = (realizationCount, featureCount))
for cnt, feature in enumerate(self.features):
featureValues = edict[feature]
(mu,sigma) = self.muAndSigmaFeatures[feature]
normedValues[:, cnt] = (featureValues - mu) / sigma
evaluation = self.__evaluateLocal__(normedValues)
return evaluation
def confidence(self):
"""
This call is used to get an estimate of the confidence in the prediction
of the clusters. The base class self.confidence checks if the clusters are
already evaluated (trained) then calls the local confidence
@ In, None
@ Out, confidence, float, the confidence
"""
if self.amITrained:
return self.__confidenceLocal__()
else:
self.raiseAnError(IOError, ' The confidence check is performed before training.')
def getDataMiningType(self):
"""
This method is used to return the type of data mining algorithm to be employed
@ In, none
@ Out, none
"""
pass
@abc.abstractmethod
def __trainLocal__(self):
"""
Perform training...
@ In, none
@ Out, none
"""
@abc.abstractmethod
def __evaluateLocal__(self, featureVals):
"""
@ In, featureVals, 2-D numpy.array, [n_samples,n_features]
@ Out, targetVals , 1-D numpy.array, [n_samples]
"""
@abc.abstractmethod
def __confidenceLocal__(self):
"""
This should return an estimation of the quality of the prediction.
@ In, none
@ Out, none
"""
#
#
class SciKitLearn(unSupervisedLearning):
"""
SciKitLearn interface for unsupervised Learning
"""
modelType = 'SciKitLearn'
availImpl = {}
def __init__(self, **kwargs):
"""
constructor for SciKitLearn class.
@ In, kwargs, dict, arguments for the SciKitLearn algorithm
@ Out, None
"""
unSupervisedLearning.__init__(self, **kwargs)
if len(self.availImpl) == 0:
import sklearn.cluster
import sklearn.mixture
import sklearn.manifold
import sklearn.decomposition
self.availImpl['cluster'] = {} # Generalized Cluster
self.availImpl['cluster']['AffinityPropogation' ] = (sklearn.cluster.AffinityPropagation , 'float') # Perform Affinity Propagation Clustering of data.
self.availImpl['cluster']['DBSCAN' ] = (sklearn.cluster.DBSCAN , 'float') # Perform DBSCAN clustering from vector array or distance matrix.
self.availImpl['cluster']['KMeans' ] = (sklearn.cluster.KMeans , 'float') # K-Means Clustering
self.availImpl['cluster']['MiniBatchKMeans' ] = (sklearn.cluster.MiniBatchKMeans , 'float') # Mini-Batch K-Means Clustering
self.availImpl['cluster']['MeanShift' ] = (sklearn.cluster.MeanShift , 'float') # Mean Shift Clustering
self.availImpl['cluster']['SpectralClustering' ] = (sklearn.cluster.SpectralClustering , 'float') # Apply clustering to a projection to the normalized laplacian.
self.availImpl['cluster']['Agglomerative' ] = (sklearn.cluster.AgglomerativeClustering, 'float') # Agglomerative Clustering - Feature of SciKit-Learn version 0.15
# self.availImpl['cluster']['FeatureAgglomeration' ] = (cluster.FeatureAgglomeration , 'float') # - Feature of SciKit-Learn version 0.15
# self.availImpl['cluster']['Ward' ] = (cluster.Ward , 'float') # Ward hierarchical clustering: constructs a tree and cuts it.
# self.availImpl['bicluster'] = {}
# self.availImpl['bicluster']['SpectralBiclustering'] = (cluster.bicluster.SpectralBiclustering, 'float') # Spectral biclustering (Kluger, 2003).
# self.availImpl['bicluster']['SpectralCoclustering'] = (cluster.bicluster.SpectralCoclustering, 'float') # Spectral Co-Clustering algorithm (Dhillon, 2001).
self.availImpl['mixture'] = {} # Generalized Gaussion Mixture Models (Classification)
self.availImpl['mixture']['GMM' ] = (sklearn.mixture.GaussianMixture , 'float') # Gaussian Mixture Model
## Comment is not even right on it, but the DPGMM is being deprecated by SKL who
## admits that it is not working correctly which also explains why it is buried in
## their documentation.
# self.availImpl['mixture']['DPGMM'] = (sklearn.mixture.DPGMM, 'float') # Variational Inference for the Infinite Gaussian Mixture Model.
self.availImpl['mixture']['VBGMM'] = (sklearn.mixture.BayesianGaussianMixture, 'float') # Variational Inference for the Gaussian Mixture Model
self.availImpl['manifold'] = {} # Manifold Learning (Embedding techniques)
self.availImpl['manifold']['LocallyLinearEmbedding' ] = (sklearn.manifold.LocallyLinearEmbedding , 'float') # Locally Linear Embedding
self.availImpl['manifold']['Isomap' ] = (sklearn.manifold.Isomap , 'float') # Isomap
self.availImpl['manifold']['MDS' ] = (sklearn.manifold.MDS , 'float') # MultiDimensional Scaling
self.availImpl['manifold']['SpectralEmbedding' ] = (sklearn.manifold.SpectralEmbedding , 'float') # Spectral Embedding for Non-linear Dimensionality Reduction
# self.availImpl['manifold']['locally_linear_embedding'] = (sklearn.manifold.locally_linear_embedding, 'float') # Perform a Locally Linear Embedding analysis on the data.
# self.availImpl['manifold']['spectral_embedding' ] = (sklearn.manifold.spectral_embedding , 'float') # Project the sample on the first eigen vectors of the graph Laplacian.
self.availImpl['decomposition'] = {} # Matrix Decomposition
self.availImpl['decomposition']['PCA' ] = (sklearn.decomposition.PCA , 'float') # Principal component analysis (PCA)
# self.availImpl['decomposition']['ProbabilisticPCA' ] = (sklearn.decomposition.ProbabilisticPCA , 'float') # Additional layer on top of PCA that adds a probabilistic evaluationPrincipal component analysis (PCA)
self.availImpl['decomposition']['RandomizedPCA' ] = (sklearn.decomposition.PCA , 'float') # Principal component analysis (PCA) using randomized SVD
self.availImpl['decomposition']['KernelPCA' ] = (sklearn.decomposition.KernelPCA , 'float') # Kernel Principal component analysis (KPCA)
self.availImpl['decomposition']['FastICA' ] = (sklearn.decomposition.FastICA , 'float') # FastICA: a fast algorithm for Independent Component Analysis.
self.availImpl['decomposition']['TruncatedSVD' ] = (sklearn.decomposition.TruncatedSVD , 'float') # Dimensionality reduction using truncated SVD (aka LSA).
self.availImpl['decomposition']['SparsePCA' ] = (sklearn.decomposition.SparsePCA , 'float') # Sparse Principal Components Analysis (SparsePCA)
self.availImpl['decomposition']['MiniBatchSparsePCA' ] = (sklearn.decomposition.MiniBatchSparsePCA , 'float') # Mini-batch Sparse Principal Components Analysis
# self.availImpl['decomposition']['ProjectedGradientNMF'] = (sklearn.decomposition.ProjectedGradientNMF, 'float') # Non-Negative matrix factorization by Projected Gradient (NMF)
# self.availImpl['decomposition']['FactorAnalysis' ] = (sklearn.decomposition.FactorAnalysis , 'float') # Factor Analysis (FA)
# self.availImpl['decomposition']['NMF' ] = (sklearn.decomposition.NMF , 'float') # Non-Negative matrix factorization by Projected Gradient (NMF)
# self.availImpl['decomposition']['SparseCoder' ] = (sklearn.decomposition.SparseCoder , 'float') # Sparse coding
# self.availImpl['decomposition']['DictionaryLearning' ] = (sklearn.decomposition.DictionaryLearning , 'float') # Dictionary Learning
# self.availImpl['decomposition']['MiniBatchDictionaryLearning'] = (sklearn.decomposition.MiniBatchDictionaryLearning, 'float') # Mini-batch dictionary learning
# self.availImpl['decomposition']['fastica' ] = (sklearn.decomposition.fastica , 'float') # Perform Fast Independent Component Analysis.
# self.availImpl['decomposition']['dict_learning' ] = (sklearn.decomposition.dict_learning , 'float') # Solves a dictionary learning matrix factorization problem.
# self.availImpl['covariance'] = {} # Covariance Estimators
# self.availImpl['covariance']['EmpiricalCovariance'] = (sklearn.covariance.EmpiricalCovariance, 'float') # Maximum likelihood covariance estimator
# self.availImpl['covariance']['EllipticEnvelope' ] = (sklearn.covariance.EllipticEnvelope , 'float') # An object for detecting outliers in a Gaussian distributed dataset.
# self.availImpl['covariance']['GraphLasso' ] = (sklearn.covariance.GraphLasso , 'float') # Sparse inverse covariance estimation with an l1-penalized estimator.
# self.availImpl['covariance']['GraphLassoCV' ] = (sklearn.covariance.GraphLassoCV , 'float') # Sparse inverse covariance w/ cross-validated choice of the l1 penalty
# self.availImpl['covariance']['LedoitWolf' ] = (sklearn.covariance.LedoitWolf , 'float') # LedoitWolf Estimator
# self.availImpl['covariance']['MinCovDet' ] = (sklearn.covariance.MinCovDet , 'float') # Minimum Covariance Determinant (MCD): robust estimator of covariance
# self.availImpl['covariance']['OAS' ] = (sklearn.covariance.OAS , 'float') # Oracle Approximating Shrinkage Estimator
# self.availImpl['covariance']['ShrunkCovariance' ] = (sklearn.covariance.ShrunkCovariance , 'float') # Covariance estimator with shrinkage
# self.availImpl['neuralNetwork'] = {} # Covariance Estimators
# self.availImpl['neuralNetwork']['BernoulliRBM'] = (neural_network.BernoulliRBM, 'float') # Bernoulli Restricted Boltzmann Machine (RBM).
self.printTag = 'SCIKITLEARN'
if 'SKLtype' not in self.initOptionDict.keys():
self.raiseAnError(IOError, ' to define a scikit learn unSupervisedLearning Method the SKLtype keyword is needed (from KDD ' + self.name + ')')
SKLtype, SKLsubType = self.initOptionDict['SKLtype'].split('|')
self.initOptionDict.pop('SKLtype')
if not SKLtype in self.__class__.availImpl.keys():
self.raiseAnError(IOError, ' Unknown SKLtype ' + SKLtype + '(from KDD ' + self.name + ')')
if not SKLsubType in self.__class__.availImpl[SKLtype].keys():
self.raiseAnError(IOError, ' Unknown SKLsubType ' + SKLsubType + '(from KDD ' + self.name + ')')
self.SKLtype = SKLtype
self.SKLsubType = SKLsubType
self.__class__.returnType = self.__class__.availImpl[SKLtype][SKLsubType][1]
self.Method = self.__class__.availImpl[SKLtype][SKLsubType][0]()
paramsDict = self.Method.get_params()
## Let's only keep the parameters that the Method understands, throw
## everything else away, maybe with a warning message?
tempDict = {}
for key, value in self.initOptionDict.items():
if key in paramsDict:
try:
tempDict[key] = ast.literal_eval(value)
except:
tempDict[key] = value
else:
self.raiseAWarning('Ignoring unknown parameter %s to the method of type %s' % (key, SKLsubType))
self.initOptionDict = tempDict
self.Method.set_params(**self.initOptionDict)
self.normValues = None
self.outputDict = {}
def __trainLocal__(self):
"""
Perform training on samples in self.normValues: array,
shape = [n_samples, n_features] or [n_samples, n_samples]
@ In, None
@ Out, None
"""
import sklearn.cluster
import sklearn.neighbors
## set bandwidth for MeanShift clustering
if hasattr(self.Method, 'bandwidth'):
if 'bandwidth' not in self.initOptionDict.keys():
self.initOptionDict['bandwidth'] = sklearn.cluster.estimate_bandwidth(self.normValues,quantile=0.3)
self.Method.set_params(**self.initOptionDict)
## We need this connectivity if we want to use structured ward
if hasattr(self.Method, 'connectivity'):
## we should find a smart way to define the number of neighbors instead of
## default constant integer value(10)
connectivity = sklearn.neighbors.kneighbors_graph(self.normValues, n_neighbors = min(10,len(self.normValues[:,0])-1))
connectivity = 0.5 * (connectivity + connectivity.T)
self.initOptionDict['connectivity'] = connectivity
self.Method.set_params(**self.initOptionDict)
self.outputDict['outputs'] = {}
self.outputDict['inputs' ] = self.normValues
## This is the stuff that will go into the solution export or just float
## around and maybe never be used
self.metaDict = {}
## What are you doing here? Calling half of these methods does nothing
## unless you store the data somewhere. If you are going to blindly call
## whatever methods that exist in the class, then at least store them for
## later. Why is this done again on the PostProcessor side? I am struggling
## to understand what this code's purpose is except to obfuscate our
## interaction with skl.
# if hasattr(self.Method, 'fit_predict'):
# self.Method.fit_predict(self.normValues)
# elif hasattr(self.Method, 'predict'):
# self.Method.fit(self.normValues)
# self.Method.predict(self.normValues)
# elif hasattr(self.Method, 'fit_transform'):
# self.Method.fit_transform(self.normValues)
# elif hasattr(self.Method, 'transform'):
# self.Method.fit(self.normValues)
# self.Method.transform(self.normValues)
self.Method.fit(self.normValues)
## I don't care what algorithm you ran, these are the only things I care
## about, if I find one of them, then I am going to save it in our defined
## variable names
variableMap = {'labels_': 'labels',
'embedding_': 'embeddingVectors',
'embedding_vectors_': 'embeddingVectors'}
## This will store stuff that should go into the solution export, but
## these each need some massaging so we will not handle this automatically.
# metaMap = {'cluster_centers_': 'clusterCenters',
# 'means_': 'means',
# 'covars_': 'covars'}
## Not used right now, but maybe someone will want it?
# otherMap = {'n_clusters': 'noClusters',
# 'weights_': 'weights',
# 'cluster_centers_indices_': 'clusterCentersIndices',
# 'precs_': 'precs',
# 'noComponents_': 'noComponents',
# 'reconstructionError': 'reconstruction_error_',
# 'explained_variance_': 'explainedVariance',
# 'explained_variance_ratio_': 'explainedVarianceRatio'}
for key,val in self.Method.__dict__.items():
if key in variableMap:
## Translate the skl name to our naming convention
self.outputDict['outputs'][variableMap[key]] = copy.deepcopy(val)
## The meta information needs special handling otherwise, we could just
## do this here and be done in two lines
# if key in metaMap:
# self.metaDict[metaMap[key]] = copy.deepcopy(val)
## Below generates the output Dictionary from the trained algorithm, can be
## defined in a new method....
if 'cluster' == self.SKLtype:
if hasattr(self.Method, 'cluster_centers_') :
centers = self.Method.cluster_centers_
elif self.metric is None:
## This methods is used by any other clustering algorithm that does
## not generatecluster_centers_ to generate the cluster centers as the
## average location of all points in the cluster.
if hasattr(self.Method,'n_clusters'):
numClusters = self.Method.n_clusters
else:
numClusters = len(set(self.Method.labels_))
centers = np.zeros([numClusters,len(self.features)])
counter = np.zeros(numClusters)
for val,index in enumerate(self.Method.labels_):
centers[index] += self.normValues[val]
counter[index] += 1
for index,val in enumerate(centers):
if counter[index] == 0.:
self.raiseAnError(RuntimeError, 'The data-mining clustering method '
+ str(self.Method)
+ ' has generated a 0-size cluster')
centers[index] = centers[index] / float(counter[index])
else:
centers = None
if centers is not None:
## I hope these arrays are consistently ordered...
## We are mixing our internal storage of muAndSigma with SKLs
## representation of our data, I believe it is fair to say that we
## hand the data to SKL in the same order that we have it stored.
for cnt, feature in enumerate(self.features):
(mu,sigma) = self.muAndSigmaFeatures[feature]
for center in centers:
center[cnt] = center[cnt] * sigma + mu
self.metaDict['clusterCenters'] = centers
elif 'mixture' == self.SKLtype:
# labels = self.Method.fit_predict(self.normValues)
## The fit_predict is not available in all versions of sklearn for GMMs
## besides the data should already be fit above
labels = self.Method.predict(self.normValues)
self.outputDict['outputs']['labels'] = labels
if hasattr(self.Method, 'converged_'):
if not self.Method.converged_:
self.raiseAnError(RuntimeError, self.SKLtype + '|' + self.SKLsubType
+ ' did not converged. (from KDD->'
+ self.SKLsubType + ')')
## For both means and covars below:
## We are mixing our internal storage of muAndSigma with SKLs
## representation of our data, I believe it is fair to say that we
## hand the data to SKL in the same order that we have it stored.
if hasattr(self.Method, 'means_'):
means = copy.deepcopy(self.Method.means_)
for cnt, feature in enumerate(self.features):
(mu,sigma) = self.muAndSigmaFeatures[feature]
for center in means:
center[cnt] = center[cnt] * sigma + mu
self.metaDict['means'] = means
if hasattr(self.Method, 'covariances_') :
covariance = copy.deepcopy(self.Method.covariances_)
for row, rowFeature in enumerate(self.features):
rowSigma = self.muAndSigmaFeatures[rowFeature][1]
for col, colFeature in enumerate(self.features):
colSigma = self.muAndSigmaFeatures[colFeature][1]
#if covariance type == full, the shape is (n_components, n_features, n_features)
if len(covariance.shape) == 3:
covariance[:,row,col] = covariance[:,row,col] * rowSigma * colSigma
else:
#XXX if covariance type == diag, this will be wrong.
covariance[row,col] = covariance[row,col] * rowSigma * colSigma
self.metaDict['covars'] = covariance
elif 'decomposition' == self.SKLtype:
if 'embeddingVectors' not in self.outputDict['outputs']:
if hasattr(self.Method, 'transform'):
embeddingVectors = self.Method.transform(self.normValues)
self.outputDict['outputs']['embeddingVectors'] = embeddingVectors
elif hasattr(self.Method, 'fit_transform'):
embeddingVectors = self.Method.fit_transform(self.normValues)
self.outputDict['outputs']['embeddingVectors'] = embeddingVectors
else:
self.raiseAWarning('The embedding vectors could not be computed.')
if hasattr(self.Method, 'components_'):
self.metaDict['components'] = self.Method.components_
if hasattr(self.Method, 'means_'):
self.metaDict['means'] = self.Method.means_
if hasattr(self.Method, 'explained_variance_'):
self.explainedVariance_ = copy.deepcopy(self.Method.explained_variance_)
self.metaDict['explainedVariance'] = self.explainedVariance_
if hasattr(self.Method, 'explained_variance_ratio_'):
self.metaDict['explainedVarianceRatio'] = self.Method.explained_variance_ratio_
def __evaluateLocal__(self, featureVals):
"""
Method to return labels of an already trained unSuperVised algorithm.
@ In, featureVals, numpy.array, feature values
@ Out, labels, numpy.array, labels
"""
if hasattr(self.Method, 'predict'):
labels = self.Method.predict(featureVals)
else:
labels = self.Method.fit_predict(featureVals)
return labels
def __confidenceLocal__(self):
"""
This should return an estimation dictionary of the quality of the
prediction.
@ In, None
@ Out, self.outputdict['confidence'], dict, dictionary of the confidence
metrics of the algorithms
"""
import sklearn.metrics
self.outputDict['confidence'] = {}
## I believe you should always have labels populated when dealing with a
## clustering algorithm, this second condition may be redundant
if 'cluster' == self.SKLtype and 'labels' in self.outputDict['outputs']:
labels = self.outputDict['outputs']['labels']
if np.unique(labels).size > 1:
self.outputDict['confidence']['silhouetteCoefficient'] = sklearn.metrics.silhouette_score(self.normValues , labels)
if hasattr(self.Method, 'inertia_'):
self.outputDict['confidence']['inertia'] = self.Method.inertia_
## If we have ground truth labels, then compute some additional confidence
## metrics
if self.labelValues is not None:
self.outputDict['confidence']['homogeneity' ] = sklearn.metrics.homogeneity_score(self.labelValues, labels)
self.outputDict['confidence']['completenes' ] = sklearn.metrics.completeness_score(self.labelValues, labels)
self.outputDict['confidence']['vMeasure' ] = sklearn.metrics.v_measure_score(self.labelValues, labels)
self.outputDict['confidence']['adjustedRandIndex' ] = sklearn.metrics.adjusted_rand_score(self.labelValues, labels)
self.outputDict['confidence']['adjustedMutualInformation'] = sklearn.metrics.adjusted_mutual_info_score(self.labelValues, labels)
elif 'mixture' == self.SKLtype:
if hasattr(self.Method, 'aic'):
self.outputDict['confidence']['aic' ] = self.Method.aic(self.normValues) ## Akaike Information Criterion
self.outputDict['confidence']['bic' ] = self.Method.bic(self.normValues) ## Bayesian Information Criterion
self.outputDict['confidence']['score'] = self.Method.score(self.normValues) ## log probabilities of each data point
return self.outputDict['confidence']
def getDataMiningType(self):
"""
This method is used to return the type of data mining algorithm to be employed
@ In, none
@ Out, self.SKLtype, string, type of data mining algorithm
"""
return self.SKLtype
#
#
class temporalSciKitLearn(unSupervisedLearning):
"""
Data mining library to perform SciKitLearn algorithms along temporal data
"""
def __init__(self, **kwargs):
"""
constructor for temporalSciKitLearn class.
@ In, kwargs, arguments for the SciKitLearn algorithm
@ Out, None
"""
unSupervisedLearning.__init__(self, **kwargs)
self.printTag = 'TEMPORALSCIKITLEARN'
if 'SKLtype' not in self.initOptionDict.keys():
self.raiseAnError(IOError, ' to define a scikit learn unSupervisedLearning Method the SKLtype keyword is needed (from KDD ' + self.name + ')')
self.SKLtype, self.SKLsubType = self.initOptionDict['SKLtype'].split('|')
self.pivotParameter = self.initOptionDict.get('pivotParameter', 'Time')
#Pop necessary to keep from confusing SciKitLearn with extra option
self.reOrderStep = int(self.initOptionDict.pop('reOrderStep', 5))
# return a SciKitLearn instance as engine for SKL data mining
self.SKLEngine = factory.returnInstance('SciKitLearn', **self.initOptionDict)
self.normValues = None
self.outputDict = {}
@staticmethod
def checkArrayConsistency(arrayin, shape):
"""
This method checks the consistency of the in-array
@ In, object... It should be an array
@ Out, tuple, tuple[0] is a bool (True -> everything is ok, False -> something wrong), tuple[1], string ,the error mesg
"""
if type(arrayin) != np.ndarray:
return (False, ' The object is not a numpy array')
if arrayin.shape[0] != shape[0] or arrayin.shape[1] != shape[1]:
return (False, ' The object shape is not correct')
## The input data matrix kind is different for different clustering methods
## e.g. [n_samples, n_features] for MeanShift and KMeans
## [n_samples,n_samples] for AffinityPropogation and SpectralClustering
## In other words, MeanShift and KMeans work with points in a vector space,
## whereas AffinityPropagation and SpectralClustering can work with
## arbitrary objects, as long as a similarity measure exists for such
## objects
## The input matrix supplied to unSupervisedLearning models as 1-D arrays o
## size [n_samples], (either n_features of or n_samples of them)
# if len(arrayin.shape) != 1: return(False, ' The array must be 1-d')
return (True, '')
def __deNormalizeData__(self,feat,t,data):
"""
Method to denormalize data based on the mean and standard deviation stored
in self.
@In, feat, string, the feature for which the input is to be denormalized
@In, t, float, time step identifier
@In, data, list, input values to be denormalized
@Out, deNormData, list, output values after denormalization
"""
N = data.shape[0]
deNormData = np.zeros(shape=data.shape)
mu, sig = self.muAndSigmaFeatures[feat][0,t], self.muAndSigmaFeatures[feat][1,t]
for n in range(N):
deNormData[n] = data[n]*sig+mu
return deNormData
def train(self, tdict):
"""
Method to train this class.
@ In, tdict, dictionary, training dictionary
@ Out, None
"""
## need to overwrite train method because time dependent data mining
## requires different treatment of input
if type(tdict) != dict:
self.raiseAnError(IOError, ' method "train". The training set needs to be provided through a dictionary. Type of the in-object is ' + str(type(tdict)))
names = list(tdict.keys())
values = list(tdict.values())
self.numberOfSample = values[0].shape[0]
self.numberOfHistoryStep = values[0].shape[1]
############################################################################
## Error-handling
## Do all of our error handling upfront to make the meat of the code more
## readable:
## Check if the user requested something that is not available
unidentifiedFeatures = set(self.features) - set(names)
if len(unidentifiedFeatures) > 0:
## Me write English good!
if len(unidentifiedFeatures) == 1:
msg = 'The requested feature: %s does not exist in the training set.' % list(unidentifiedFeatures)[0]
else:
msg = 'The requested features: %s do not exist in the training set.' % str(list(unidentifiedFeatures))
self.raiseAnError(IOError, msg)
## Check that all of the values have the same length
## Check if a label feature is provided by the user and in the training data
if self.labelFeature in names:
self.labelValues = tidct[self.labelFeature]
resp = self.checkArrayConsistency(self.labelValues,[self.numberOfSample, self.numberOfHistoryStep])
if not resp[0]:
self.raiseAnError(IOError, 'In training set for ground truth labels ' + self.labelFeature + ':' + resp[1])
else:
self.raiseAWarning(' The ground truth labels are not known a priori')
self.labelValues = None
## End Error-handling
############################################################################
self.normValues = {}
for cnt,feature in enumerate(self.features):
resp = self.checkArrayConsistency(tdict[feature], [self.numberOfSample, self.numberOfHistoryStep])
if not resp[0]:
self.raiseAnError(IOError, ' In training set for feature ' + feature + ':' + resp[1])
self.normValues[feature] = np.zeros(shape = tdict[feature].shape)
self.muAndSigmaFeatures[feature] = np.zeros(shape=(2,self.numberOfHistoryStep))
for t in range(self.numberOfHistoryStep):
featureValues = tdict[feature][:,t]
(mu,sigma) = mathUtils.normalizationFactors(featureValues)
## Store the normalized training data, and the normalization factors for
## later use
self.normValues[feature][:,t] = (featureValues - mu) / sigma
self.muAndSigmaFeatures[feature][0,t] = mu
self.muAndSigmaFeatures[feature][1,t] = sigma
self.inputDict = tdict
self.__trainLocal__()
self.amITrained = True
def __trainLocal__(self):
"""
Method to train this class.
"""
self.outputDict['outputs'] = {}
self.outputDict['inputs' ] = self.normValues
## This is the stuff that will go into the solution export or just float
## around and maybe never be used
self.metaDict = {}
for t in range(self.numberOfHistoryStep):
sklInput = {}
for feat in self.features:
sklInput[feat] = self.inputDict[feat][:,t]
self.SKLEngine.features = sklInput
self.SKLEngine.train(sklInput)
self.SKLEngine.confidence()
## Store everything from the specific timestep's SKLEngine into a running
## list
for key,val in self.SKLEngine.outputDict['outputs'].items():
if key not in self.outputDict['outputs']:
self.outputDict['outputs'][key] = {} # [None]*self.numberOfHistoryStep
self.outputDict['outputs'][key][t] = val
for key,val in self.SKLEngine.metaDict.items():
if key not in self.metaDict:
self.metaDict[key] = {} # [None]*self.numberOfHistoryStep
self.metaDict[key][t] = val
if self.SKLtype in ['cluster']:
if 'clusterCenters' not in self.metaDict.keys():
self.metaDict['clusterCenters'] = {}
if 'clusterCentersIndices' not in self.metaDict.keys():
self.metaDict['clusterCentersIndices'] = {}
# # collect labels
# if hasattr(self.SKLEngine.Method, 'labels_'):
# self.outputDict['labels'][t] = self.SKLEngine.Method.labels_
# # collect cluster centers
if hasattr(self.SKLEngine.Method, 'cluster_centers_'):
self.metaDict['clusterCenters'][t] = np.zeros(shape=self.SKLEngine.metaDict['clusterCenters'].shape)
for cnt, feat in enumerate(self.features):
self.metaDict['clusterCenters'][t][:,cnt] = self.SKLEngine.metaDict['clusterCenters'][:,cnt]
else:
self.metaDict['clusterCenters'][t] = self.__computeCenter__(sklInput, self.outputDict['outputs']['labels'][t])
# collect number of clusters
if hasattr(self.SKLEngine.Method, 'n_clusters'):
noClusters = self.SKLEngine.Method.n_clusters
else:
noClusters = self.metaDict['clusterCenters'][t].shape[0]
# collect cluster indices
# if hasattr(self.SKLEngine.Method, 'cluster_centers_indices_'):
# self.metaDict['clusterCentersIndices'][t] = self.SKLEngine.Method.cluster_centers_indices_
# self.metaDict['clusterCentersIndices'][t] = range(noClusters)
# else:
# self.metaDict['clusterCentersIndices'][t] = range(noClusters) # use list(set(self.SKLEngine.Method.labels_)) to collect outliers
self.metaDict['clusterCentersIndices'][t] = list(range(noClusters))
# # collect optional output
# if hasattr(self.SKLEngine.Method, 'inertia_'):
# if 'inertia' not in self.outputDict.keys(): self.outputDict['inertia'] = {}
# self.outputDict['inertia'][t] = self.SKLEngine.Method.inertia_
# re-order clusters
if t > 0:
remap = self.__reMapCluster__(t, self.metaDict['clusterCenters'], self.metaDict['clusterCentersIndices'])
for n in range(len(self.metaDict['clusterCentersIndices'][t])):
self.metaDict['clusterCentersIndices'][t][n] = remap[self.metaDict['clusterCentersIndices'][t][n]]
for n in range(len(self.outputDict['outputs']['labels'][t])):
if self.outputDict['outputs']['labels'][t][n] >=0:
self.outputDict['outputs']['labels'][t][n] = remap[self.SKLEngine.Method.labels_[n]]
## TODO: Remap the cluster centers now...
elif self.SKLtype in ['mixture']:
if 'means' not in self.metaDict.keys():
self.metaDict['means'] = {}
if 'componentMeanIndices' not in self.metaDict.keys():
self.metaDict['componentMeanIndices'] = {}
# # collect component membership
if 'labels' not in self.outputDict['outputs']:
self.outputDict['outputs']['labels'] = {}
self.outputDict['outputs']['labels'][t] = self.SKLEngine.evaluate(sklInput)
# # collect component means
if hasattr(self.SKLEngine.Method, 'means_'):
self.metaDict['means'][t] = np.zeros(shape=self.SKLEngine.Method.means_.shape)
for cnt, feat in enumerate(self.features):
self.metaDict['means'][t][:,cnt] = self.__deNormalizeData__(feat,t,self.SKLEngine.Method.means_[:,cnt])
else:
self.metaDict['means'][t] = self.__computeCenter__(Input['Features'], self.outputDict['labels'][t])
# # collect number of components
if hasattr(self.SKLEngine.Method, 'n_components'):
numComponents = self.SKLEngine.Method.n_components
else:
numComponents = self.metaDict['means'][t].shape[0]
# # collect component indices
self.metaDict['componentMeanIndices'][t] = list(range(numComponents))
# # collect optional output
if hasattr(self.SKLEngine.Method, 'weights_'):
if 'weights' not in self.metaDict.keys():
self.metaDict['weights'] = {}
self.metaDict['weights'][t] = self.SKLEngine.Method.weights_
if 'covars' in self.SKLEngine.metaDict:
if 'covars' not in self.metaDict.keys():
self.metaDict['covars'] = {}
self.metaDict['covars'][t] = self.SKLEngine.metaDict['covars']
if hasattr(self.SKLEngine.Method, 'precs_'):
if 'precs' not in self.metaDict.keys():
self.metaDict['precs'] = {}
self.metaDict['precs'][t] = self.SKLEngine.Method.precs_
# if hasattr(self.SKLEngine.Method, 'converged_'):
# if 'converged' not in self.outputDict.keys():
# self.outputDict['converged'] = {}
# self.outputDict['converged'][t] = self.SKLEngine.Method.converged_
# re-order components
if t > 0:
remap = self.__reMapCluster__(t, self.metaDict['means'], self.metaDict['componentMeanIndices'])
for n in range(len(self.metaDict['componentMeanIndices'][t])):
self.metaDict['componentMeanIndices'][t][n] = remap[self.metaDict['componentMeanIndices'][t][n]]
for n in range(len(self.outputDict['outputs']['labels'][t])):
if self.outputDict['outputs']['labels'][t][n] >=0:
self.outputDict['outputs']['labels'][t][n] = remap[self.outputDict['outputs']['labels'][t][n]]
elif 'manifold' == self.SKLtype:
# if 'noComponents' not in self.outputDict.keys():
# self.outputDict['noComponents'] = {}
if 'embeddingVectors' not in self.outputDict['outputs']:
self.outputDict['outputs']['embeddingVectors'] = {}
if hasattr(self.SKLEngine.Method, 'embedding_'):
self.outputDict['outputs']['embeddingVectors'][t] = self.SKLEngine.Method.embedding_
if 'transform' in dir(self.SKLEngine.Method):
self.outputDict['outputs']['embeddingVectors'][t] = self.SKLEngine.Method.transform(self.SKLEngine.normValues)
elif 'fit_transform' in dir(self.SKLEngine.Method):
self.outputDict['outputs']['embeddingVectors'][t] = self.SKLEngine.Method.fit_transform(self.SKLEngine.normValues)
# if hasattr(self.SKLEngine.Method, 'reconstruction_error_'):
# if 'reconstructionError_' not in self.outputDict.keys():
# self.outputDict['reconstructionError_'] = {}
# self.outputDict['reconstructionError_'][t] = self.SKLEngine.Method.reconstruction_error_
elif 'decomposition' == self.SKLtype:
for var in ['explainedVarianceRatio','means','explainedVariance',
'components']:
if var not in self.metaDict:
self.metaDict[var] = {}
if hasattr(self.SKLEngine.Method, 'components_'):
self.metaDict['components'][t] = self.SKLEngine.Method.components_
## This is not the same thing as the components above! This is the
## transformed data, the other composes the transformation matrix to get
## this. Whoever designed this, you are causing me no end of headaches
## with this code... I am pretty sure this can all be handled within the
## post-processor rather than adding this frankenstein of code just to
## gain access to the skl techniques.
if 'embeddingVectors' not in self.outputDict['outputs']:
if 'transform' in dir(self.SKLEngine.Method):
embeddingVectors = self.SKLEngine.Method.transform(self.SKLEngine.normValues)
elif 'fit_transform' in dir(self.SKLEngine.Method):
embeddingVectors = self.SKLEngine.Method.fit_transform(self.SKLEngine.normValues)
self.outputDict['outputs']['embeddingVectors'][t] = embeddingVectors
if hasattr(self.SKLEngine.Method, 'means_'):
self.metaDict['means'][t] = self.SKLEngine.Method.means_
if hasattr(self.SKLEngine.Method, 'explained_variance_'):
self.metaDict['explainedVariance'][t] = self.SKLEngine.Method.explained_variance_
if hasattr(self.SKLEngine.Method, 'explained_variance_ratio_'):
self.metaDict['explainedVarianceRatio'][t] = self.SKLEngine.Method.explained_variance_ratio_
else:
self.raiseAnError(IOError, 'Unknown type: ' + str(self.SKLtype))
def __computeCenter__(self, data, labels):
"""
Method to compute cluster center for clustering algorithms that do not return such information.
This is needed to re-order cluster number
@In, data, dict, each value of the dict is a 1-d array of data
@In, labels, list, list of label for each sample
@Out, clusterCenter, array, shape = [no_clusters, no_features], center coordinate
"""
point = {}
for cnt, l in enumerate(labels):
if l >= 0 and l not in point.keys():
point[l] = []
if l >= 0:
point[l].append(cnt)
noCluster = len(point.keys())
if noCluster == 0:
self.raiseAnError(ValueError, 'number of cluster is 0!!!')
clusterCenter = np.zeros(shape=(noCluster,len(self.features)))
for cnt, feat in enumerate(self.features):
for ind, l in enumerate(point.keys()):
clusterCenter[ind,cnt] = np.average(data[feat][point[l]])
return clusterCenter
def __computeDist__(self,t,n1,n2,dataCenter,opt):
"""
Computes the distance between two cluster centers.
Four different distance metrics are implemented, which can be specified by input opt
@In, t, float, current time
@In, n1, integer, center index 1
@In, n2, integer, center index 2
@In, dataCenter, dict, each value contains the center coordinate at each time step
@In, opt, string, specifies which distance metric to use
@Out, dist, float, distance between center n1 and center n2
"""
x1 = dataCenter[t-1][n1,:]
x2 = dataCenter[t][n2,:]
if opt in ['Distance']:
dist = np.sqrt(np.dot(x1-x2,x1-x2))
return dist
if opt in ['Overlap']:
l1 = self.outputDict['outputs']['labels'][t-1]
l2 = self.SKLEngine.Method.labels_
point1 = []
point2 = []
for n in range(len(l1)):
if l1[n] == n1:
point1.append(n)
for n in range(len(l2)):
if l2[n] == n2:
point2.append(n)
dist = - len(set(point1).intersection(point2))
return dist
if opt in ['DistVariance']:
l1 = self.outputDict['outputs']['labels'][t-1]
l2 = self.SKLEngine.Method.labels_
dist = np.sqrt(np.dot(x1-x2,x1-x2))
v1 = v2 = N1 = N2 = 0
noFeat = len(self.features)
for n in range(len(l1)):
# compute variance of points with label l1
if l1[n] == n1:
x = np.zeros(shape=(noFeat,))
for cnt, feat in enumerate(self.features):
x[cnt] = self.inputDict[feat][n,t-1]
v1 += np.sqrt(np.dot(x-x1,x-x1))**2
N1 += 1
for n in range(len(l2)):
# compute variance of points with label l2
if l2[n] == n2:
x = np.zeros(shape=(noFeat,))
for cnt, feat in enumerate(self.features):
x[cnt] = self.inputDict[feat][n,t]
v2 += np.sqrt(np.dot(x-x2,x-x2))**2
N2 += 1
dist += np.abs(np.sqrt(v1/(N1-1)*1.0) - np.sqrt(v2/(N2-1)*1.0))
return dist
if opt in ['DistanceWithDecay']:
K = self.reOrderStep
decR = 1
dist = 0
for k in range(1,K+1):
if t-k >= 0:
if n1 < dataCenter[t-k].shape[0]:
x1 = dataCenter[t-k][n1,:]
dist += np.sqrt(np.dot(x1-x2,x1-x2))*np.exp(-(k-1)*decR)
return dist
def __reMapCluster__(self,t,dataCenter,dataCenterIndex):
"""
Computes the remapping relationship between the current time step cluster and the previous time step
@In, t, float, current time
@In, dataCenter, dict, each value contains the center coordinate at each time step
@In, dataCenterIndex, dict, each value contains the center index at each time step
@Out, remap, list, remapping relation between the current time step cluster and the previous time step
"""
indices1 = dataCenterIndex[t-1]
indices2 = dataCenterIndex[t]
N1 = dataCenter[t-1].shape[0]
N2 = dataCenter[t].shape[0]
dMatrix = np.zeros(shape=(N1,N2))
for n1 in range(N1):
for n2 in range(N2):
dMatrix[n1,n2] = self.__computeDist__(t,n1,n2,dataCenter,'DistanceWithDecay')
_, mapping = self.__localReMap__(dMatrix, (list(range(N1)), list(range(N2))))
remap = {}
f1, f2 = [False]*N1, [False]*N2
for mp in mapping:
i1, i2 = mp[0], mp[1]
if f1[i1] or f2[i2]:
self.raiseAnError(ValueError, 'Mapping is overlapped. ')
remap[indices2[i2]] = indices1[i1]
f1[i1], f2[i2] = True, True
if N2 > N1:
# for the case the new cluster comes up
tmp = 1
for n2 in range(N2):
if indices2[n2] not in remap.keys():
remap[indices2[n2]] = max(indices1)+tmp
# remap[indices2[n2]] = self.maxNoClusters + 1 # every discontinuity would introduce a new cluster index.
return remap
def __localReMap__(self, dMatrix,loc):
"""
Method to return the mapping based on distance stored in dMatrix, the returned mapping shall minimize the global sum of distance
This function is recursively called to find the global minimum, so is computationally expensive --- FIXME
@In, dMatrix, array, shape = (no_clusterAtPreviousTimeStep, no_clusterAtCurrentTimeStep)
@In, loc, tuple, the first element is the cluster indeces for previous time step and the second one is for the current time step
@Out, sumDist, float, global sum of distance
@Out, localReMap, list, remapping relation between the row and column identifier of dMatrix
"""
if len(loc[0]) == 1:
sumDist, localReMap = np.inf, -1
n1 = loc[0][0]
for n2 in loc[1]:
if dMatrix[n1,n2] < sumDist:
sumDist = dMatrix[n1,n2]
localReMap = n2
return sumDist, [(n1,localReMap)]
elif len(loc[1]) == 1:
sumDist, localReMap = np.inf, -1
n2 = loc[1][0]
for n1 in loc[0]:
if dMatrix[n1,n2] < sumDist:
sumDist = dMatrix[n1,n2]
localReMap = n1
return sumDist, [(localReMap,n2)]
else:
sumDist, i1, i2, localReMap = np.inf, -1, -1, []
n1 = loc[0][0]
temp1 = copy.deepcopy(loc[0])
temp1.remove(n1)
for n2 in loc[1]:
temp2 = copy.deepcopy(loc[1])
temp2.remove(n2)
d_temp, l = self.__localReMap__(dMatrix, (temp1,temp2))
if dMatrix[n1,n2] + d_temp < sumDist:
sumDist = dMatrix[n1,n2] + d_temp
i1, i2, localReMap = n1, n2, l
localReMap.append((i1,i2))
return sumDist, localReMap
def __evaluateLocal__(self, featureVals):
"""
Not implemented for this class
"""
pass
def __confidenceLocal__(self):
"""
Not implemented for this class
"""
pass
def getDataMiningType(self):
"""
This method is used to return the type of data mining algorithm to be employed
@ In, none
@ Out, self.SKLtype, string, type of data mining algorithm
"""
return self.SKLtype
class Scipy(unSupervisedLearning):
"""
Scipy interface for hierarchical Learning
"""
modelType = 'Scipy'
availImpl = {}
availImpl['cluster'] = {}
availImpl['cluster']['Hierarchical'] = (hier.hierarchy, 'float') # Perform Hierarchical Clustering of data.
def __init__(self, **kwargs):
"""
constructor for Scipy class.
@ In, kwargs, dict, arguments for the Scipy algorithm
@ Out, None
"""
unSupervisedLearning.__init__(self, **kwargs)
self.printTag = 'SCIPY'
if 'SCIPYtype' not in self.initOptionDict.keys():
self.raiseAnError(IOError, ' to define a Scipy unSupervisedLearning Method the SCIPYtype keyword is needed (from KDD ' + self.name + ')')
SCIPYtype, SCIPYsubType = self.initOptionDict['SCIPYtype'].split('|')
self.initOptionDict.pop('SCIPYtype')
if not SCIPYtype in self.__class__.availImpl.keys():
self.raiseAnError(IOError, ' Unknown SCIPYtype ' + SCIPYtype)
if not SCIPYsubType in self.__class__.availImpl[SCIPYtype].keys():
self.raiseAnError(IOError, ' Unknown SCIPYsubType ' + SCIPYsubType)
self.__class__.returnType = self.__class__.availImpl[SCIPYtype][SCIPYsubType][1]
self.Method = self.__class__.availImpl[SCIPYtype][SCIPYsubType][0]
self.SCIPYtype = SCIPYtype
self.SCIPYsubType = SCIPYsubType
self.normValues = None
self.outputDict = {}
def __trainLocal__(self):
"""
Perform training on samples in self.normValues: array, shape = [n_samples, n_features] or [n_samples, n_samples]
@ In, None
@ Out, None
"""
self.outputDict['outputs'] = {}
self.outputDict['inputs' ] = self.normValues
if hasattr(self.Method, 'linkage'):
self.linkage = self.Method.linkage(self.normValues,self.initOptionDict['method'],self.initOptionDict['metric'])
if 'dendrogram' in self.initOptionDict and self.initOptionDict['dendrogram'] == 'true':
self.advDendrogram(self.linkage,
p = float(self.initOptionDict['p']),
leaf_rotation = 90.,
leaf_font_size = 12.,
truncate_mode = self.initOptionDict['truncationMode'],
show_leaf_counts = self.initOptionDict['leafCounts'],
show_contracted = self.initOptionDict['showContracted'],
annotate_above = self.initOptionDict['annotatedAbove'],
#orientation = self.initOptionDict['orientation'],
max_d = self.initOptionDict['level'])
self.labels_ = hier.hierarchy.fcluster(self.linkage, self.initOptionDict['level'],self.initOptionDict['criterion'])
self.outputDict['outputs']['labels'] = self.labels_
return self.labels_
def advDendrogram(self,*args, **kwargs):
"""
This methods actually creates the dendrogram
@ In, None
@ Out, None
"""
plt.figure()
max_d = kwargs.pop('max_d', None)
if max_d and 'color_threshold' not in kwargs:
kwargs['color_threshold'] = max_d
annotate_above = kwargs.pop('annotate_above', 0)
ddata = hier.hierarchy.dendrogram(*args, **kwargs)
if not kwargs.get('no_plot', False):
plt.title('Hierarchical Clustering Dendrogram')
plt.xlabel('sample index')
plt.ylabel('distance')
for i, d, c in zip(ddata['icoord'], ddata['dcoord'], ddata['color_list']):
x = 0.5 * sum(i[1:3])
y = d[1]
if y > annotate_above:
plt.plot(x, y, 'o', c=c)
plt.annotate("%.3g" % y, (x, y), xytext=(15, 11),
textcoords='offset points',
va='top', ha='center')
if max_d:
plt.axhline(y=max_d, c='0.1')
if 'dendFileID' in self.initOptionDict:
title = self.initOptionDict['dendFileID'] + '.pdf'
else:
title = 'dendrogram.pdf'
plt.savefig(title)
plt.close()
def __evaluateLocal__(self,*args, **kwargs):
"""
Method to return output of an already trained scipy algorithm.
@ In, featureVals, numpy.array, feature values
@ Out, self.dData, numpy.array, dendrogram
"""
pass
def __confidenceLocal__(self):
pass
def getDataMiningType(self):
"""
This method is used to return the type of data mining algorithm to be employed
@ In, none
@ Out, self.SCIPYtype, string, type of data mining algorithm
"""
return self.SCIPYtype
factory = EntityFactory('unSuperVisedLearning')
factory.registerType('SciKitLearn', SciKitLearn)
factory.registerType('temporalSciKitLearn', temporalSciKitLearn)
factory.registerType('Scipy', Scipy)
|
idaholab/raven
|
framework/unSupervisedLearning.py
|
Python
|
apache-2.0
| 64,055
|
[
"Gaussian"
] |
536f9bd8c10b065adf3a03e192cd8111c8308026047b67994d4f52a34cc67945
|
import torch
import math
import os
import shutil
import pyprob
import pyprob.diagnostics
from pyprob import Model, InferenceEngine
from pyprob.distributions import Uniform, Normal
class GaussianWithUnknownMeanMarsaglia(Model):
def __init__(self, prior_mean=1, prior_stddev=math.sqrt(5), likelihood_stddev=math.sqrt(2), replace=True, *args, **kwargs):
self.prior_mean = prior_mean
self.prior_stddev = prior_stddev
self.likelihood_stddev = likelihood_stddev
self.replace = replace
super().__init__('Gaussian with unknown mean (Marsaglia)', *args, **kwargs)
def marsaglia(self, mean, stddev):
uniform = Uniform(-1, 1)
s = 1
i = 0
while True:
x = pyprob.sample(uniform, replace=self.replace)
y = pyprob.sample(uniform, replace=self.replace)
s = x*x + y*y
i += 1
if float(s) < 1:
pyprob.tag(x, name='x_accepted')
pyprob.tag(y, name='y_accepted')
pyprob.tag(s, name='s_accepted')
break
else:
pyprob.tag(x, name='x_rejected')
pyprob.tag(y, name='y_rejected')
pyprob.tag(s, name='s_rejected')
pyprob.tag(i, name='iterations')
return mean + stddev * (x * torch.sqrt(-2 * torch.log(s) / s))
def forward(self):
mu = self.marsaglia(self.prior_mean, self.prior_stddev)
likelihood = Normal(mu, self.likelihood_stddev)
pyprob.tag(mu, name='mu')
pyprob.observe(likelihood, name='obs0')
pyprob.observe(likelihood, name='obs1')
return mu
def produce_results(replace, results_dir):
num_traces = 100000
num_ic_training_traces = 1000000
if os.path.exists(results_dir):
shutil.rmtree(results_dir)
pyprob.util.create_path(results_dir, directory=True)
address_dict_file_name = os.path.join(results_dir, 'address_dict')
model = GaussianWithUnknownMeanMarsaglia(address_dict_file_name=address_dict_file_name, replace=replace)
ground_truth_trace = next(model._trace_generator(inference_engine=InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS))
observes = {'obs0': ground_truth_trace.named_variables['obs0'].value, 'obs1': ground_truth_trace.named_variables['obs1'].value}
# posterior_is_file_name = os.path.join(results_dir, 'posterior_is')
posterior_is = model.posterior(num_traces, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING, observe=observes)
proposal_is = posterior_is.unweighted().rename(posterior_is.name.replace('Posterior', 'Proposal'))
model.learn_inference_network(num_ic_training_traces, observe_embeddings={'obs0': {}, 'obs1': {}}, inference_network=pyprob.InferenceNetwork.LSTM)
# posterior_ic_file_name = os.path.join(results_dir, 'posterior_ic')
posterior_ic = model.posterior(num_traces, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK, observe=observes)
proposal_ic = posterior_ic.unweighted().rename(posterior_ic.name.replace('Posterior', 'Proposal'))
posterior_rmh_file_name = os.path.join(results_dir, 'posterior_rmh')
posterior_rmh = model.posterior(num_traces, inference_engine=InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS, observe=observes)
posterior_rmh_autocorrelation_file_name = os.path.join(results_dir, 'posterior_rmh_autocorrelation')
pyprob.diagnostics.autocorrelation(posterior_rmh, n_most_frequent=50, plot=True, plot_show=False, file_name=posterior_rmh_autocorrelation_file_name)
posterior_rmh_gt_file_name = os.path.join(results_dir, 'posterior_rmh_gt')
posterior_rmh_gt = model.posterior(num_traces, inference_engine=InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS, observe=observes, initial_trace=ground_truth_trace)
posterior_rmh_gr_file_name = os.path.join(results_dir, 'posterior_rmh_gelman_rubin')
pyprob.diagnostics.gelman_rubin([posterior_rmh, posterior_rmh_gt], n_most_frequent=None, plot=True, plot_show=False, file_name=posterior_rmh_gr_file_name)
posterior_rmh_log_prob_file_name = os.path.join(results_dir, 'posterior_rmh_log_prob')
pyprob.diagnostics.log_prob([posterior_rmh, posterior_rmh_gt], plot=True, plot_show=False, file_name=posterior_rmh_log_prob_file_name)
pyprob.util.create_path(os.path.join(results_dir, 'addresses'), directory=True)
pyprob.util.create_path(os.path.join(results_dir, 'addresses_aggregated'), directory=True)
pyprob.util.create_path(os.path.join(results_dir, 'graph'), directory=True)
pyprob.util.create_path(os.path.join(results_dir, 'graph_aggregated'), directory=True)
posterior_is_rmh_addresses_file_name = os.path.join(results_dir, 'addresses/posterior_is_rmh_addresses')
pyprob.diagnostics.address_histograms([posterior_rmh, proposal_is, posterior_is], plot=True, plot_show=False, ground_truth_trace=ground_truth_trace, use_address_base=False, file_name=posterior_is_rmh_addresses_file_name)
posterior_is_rmh_addresses_aggregated_file_name = os.path.join(results_dir, 'addresses_aggregated/posterior_is_rmh_addresses_aggregated')
pyprob.diagnostics.address_histograms([posterior_rmh, proposal_is, posterior_is], plot=True, plot_show=False, ground_truth_trace=ground_truth_trace, use_address_base=True, file_name=posterior_is_rmh_addresses_aggregated_file_name)
posterior_ic_rmh_addresses_file_name = os.path.join(results_dir, 'addresses/posterior_ic_rmh_addresses')
pyprob.diagnostics.address_histograms([posterior_rmh, proposal_ic, posterior_ic], plot=True, plot_show=False, ground_truth_trace=ground_truth_trace, use_address_base=False, file_name=posterior_ic_rmh_addresses_file_name)
posterior_ic_rmh_addresses_aggregated_file_name = os.path.join(results_dir, 'addresses_aggregated/posterior_ic_rmh_addresses_aggregated')
pyprob.diagnostics.address_histograms([posterior_rmh, proposal_ic, posterior_ic], plot=True, plot_show=False, ground_truth_trace=ground_truth_trace, use_address_base=True, file_name=posterior_ic_rmh_addresses_aggregated_file_name)
posterior_is_graph_file_name = os.path.join(results_dir, 'graph/posterior_is_graph')
pyprob.diagnostics.graph(posterior_is, file_name=posterior_is_graph_file_name)
posterior_is_graph_aggregated_file_name = os.path.join(results_dir, 'graph_aggregated/posterior_is_graph_aggregated')
pyprob.diagnostics.graph(posterior_is, use_address_base=True, file_name=posterior_is_graph_aggregated_file_name)
posterior_ic_graph_file_name = os.path.join(results_dir, 'graph/posterior_ic_graph')
pyprob.diagnostics.graph(posterior_ic, file_name=posterior_ic_graph_file_name)
posterior_ic_graph_aggregated_file_name = os.path.join(results_dir, 'graph_aggregated/posterior_ic_graph_aggregated')
pyprob.diagnostics.graph(posterior_ic, use_address_base=True, file_name=posterior_ic_graph_aggregated_file_name)
posterior_rmh_graph_file_name = os.path.join(results_dir, 'graph/posterior_rmh_graph')
pyprob.diagnostics.graph(posterior_rmh, file_name=posterior_rmh_graph_file_name)
posterior_rmh_graph_aggregated_file_name = os.path.join(results_dir, 'graph_aggregated/posterior_rmh_graph_aggregated')
pyprob.diagnostics.graph(posterior_rmh, use_address_base=True, file_name=posterior_rmh_graph_aggregated_file_name)
posterior_is.close()
posterior_ic.close()
posterior_rmh.close()
posterior_rmh_gt.close()
if __name__ == '__main__':
pyprob.set_random_seed(1)
current_dir = os.path.dirname(os.path.abspath(__file__))
print('Current dir: {}'.format(current_dir))
results_dir = os.path.join(current_dir, 'gum_marsaglia/replace_true')
produce_results(replace=True, results_dir=results_dir)
results_dir = os.path.join(current_dir, 'gum_marsaglia/replace_false')
produce_results(replace=False, results_dir=results_dir)
print('Done')
|
probprog/pyprob
|
tests/extra/diagnostics/gum_marsaglia.py
|
Python
|
bsd-2-clause
| 7,875
|
[
"Gaussian"
] |
fbc826020ec42535501da21898f7693c5ed9867503f7a09bf23e270370901e67
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests that relate to evaluating models with quantity parameters
"""
import numpy as np
import pytest
from numpy.testing import assert_allclose
from ..core import Model
from ..models import Gaussian1D
from ... import units as u
from ...units import UnitsError
from ...tests.helper import assert_quantity_allclose
# We start off by taking some simple cases where the units are defined by
# whatever the model is initialized with, and we check that the model evaluation
# returns quantities.
def test_evaluate_with_quantities():
"""
Test evaluation of a single model with Quantity parameters that do
not explicitly require units.
"""
# We create two models here - one with quantities, and one without. The one
# without is used to create the reference values for comparison.
g = Gaussian1D(1, 1, 0.1)
gq = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# We first check that calling the Gaussian with quantities returns the
# expected result
assert_quantity_allclose(gq(1 * u.m), g(1) * u.J)
# Units have to be specified for the Gaussian with quantities - if not, an
# error is raised
with pytest.raises(UnitsError) as exc:
gq(1)
assert exc.value.args[0] == ("Units of input 'x', (dimensionless), could not be "
"converted to required input units of m (length)")
# However, zero is a special case
assert_quantity_allclose(gq(0), g(0) * u.J)
# We can also evaluate models with equivalent units
assert_allclose(gq(0.0005 * u.km).value, g(0.5))
# But not with incompatible units
with pytest.raises(UnitsError) as exc:
gq(3 * u.s)
assert exc.value.args[0] == ("Units of input 'x', s (time), could not be "
"converted to required input units of m (length)")
# We also can't evaluate the model without quantities with a quantity
with pytest.raises(UnitsError) as exc:
g(3 * u.m)
# TODO: determine what error message should be here
# assert exc.value.args[0] == ("Units of input 'x', m (length), could not be "
# "converted to required dimensionless input")
def test_evaluate_with_quantities_and_equivalencies():
"""
We now make sure that equivalencies are correctly taken into account
"""
g = Gaussian1D(1 * u.Jy, 10 * u.nm, 2 * u.nm)
# We aren't setting the equivalencies, so this won't work
with pytest.raises(UnitsError) as exc:
g(30 * u.PHz)
assert exc.value.args[0] == ("Units of input 'x', PHz (frequency), could "
"not be converted to required input units of "
"nm (length)")
# But it should now work if we pass equivalencies when evaluating
assert_quantity_allclose(g(30 * u.PHz, equivalencies={'x': u.spectral()}),
g(9.993081933333332 * u.nm))
class MyTestModel(Model):
inputs = ('a', 'b')
outputs = ('f',)
def evaluate(self, a, b):
print('a', a)
print('b', b)
return a * b
class TestInputUnits():
def setup_method(self, method):
self.model = MyTestModel()
def test_evaluate(self):
# We should be able to evaluate with anything
assert_quantity_allclose(self.model(3, 5), 15)
assert_quantity_allclose(self.model(4 * u.m, 5), 20 * u.m)
assert_quantity_allclose(self.model(3 * u.deg, 5), 15 * u.deg)
def test_input_units(self):
self.model.input_units = {'a': u.deg}
assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg)
assert_quantity_allclose(self.model(4 * u.rad, 2), 8 * u.rad)
assert_quantity_allclose(self.model(4 * u.rad, 2 * u.s), 8 * u.rad * u.s)
with pytest.raises(UnitsError) as exc:
self.model(4 * u.s, 3)
assert exc.value.args[0] == ("Units of input 'a', s (time), could not be "
"converted to required input units of deg (angle)")
with pytest.raises(UnitsError) as exc:
self.model(3, 3)
assert exc.value.args[0] == ("Units of input 'a', (dimensionless), could "
"not be converted to required input units of deg (angle)")
def test_input_units_allow_dimensionless(self):
self.model.input_units = {'a': u.deg}
self.model.input_units_allow_dimensionless = True
assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg)
assert_quantity_allclose(self.model(4 * u.rad, 2), 8 * u.rad)
with pytest.raises(UnitsError) as exc:
self.model(4 * u.s, 3)
assert exc.value.args[0] == ("Units of input 'a', s (time), could not be "
"converted to required input units of deg (angle)")
assert_quantity_allclose(self.model(3, 3), 9)
def test_input_units_strict(self):
self.model.input_units = {'a': u.deg}
self.model.input_units_strict = True
assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg)
result = self.model(np.pi * u.rad, 2)
assert_quantity_allclose(result, 360 * u.deg)
assert result.unit is u.deg
def test_input_units_equivalencies(self):
self.model.input_units = {'a': u.micron}
with pytest.raises(UnitsError) as exc:
self.model(3 * u.PHz, 3)
assert exc.value.args[0] == ("Units of input 'a', PHz (frequency), could "
"not be converted to required input units of "
"micron (length)")
self.model.input_units_equivalencies = {'a': u.spectral()}
assert_quantity_allclose(self.model(3 * u.PHz, 3),
3 * (3 * u.PHz).to(u.micron, equivalencies=u.spectral()))
def test_return_units(self):
self.model.input_units = {'a': u.deg}
self.model.return_units = {'f': u.rad}
result = self.model(3 * u.deg, 4)
assert_quantity_allclose(result, 12 * u.deg)
assert result.unit is u.rad
def test_return_units_scalar(self):
# Check that return_units also works when giving a single unit since
# there is only one output, so is unambiguous.
self.model.input_units = {'a': u.deg}
self.model.return_units = u.rad
result = self.model(3 * u.deg, 4)
assert_quantity_allclose(result, 12 * u.deg)
assert result.unit is u.rad
|
funbaker/astropy
|
astropy/modeling/tests/test_quantities_evaluation.py
|
Python
|
bsd-3-clause
| 6,581
|
[
"Gaussian"
] |
828454abf5598806df52f18316004d61bc79ddf216f169b6001a8f9a8c52ff2b
|
'''
Yugang Created at Aug 08, 2016, CHX-NSLS-II
Create a PDF file from XPCS data analysis results, which are generated by CHX data analysis pipeline
How to use:
python Create_Report.py full_file_path uid output_dir (option)
An exmplae to use:
python Create_Report.py /XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/ af8f66
python Create_Report.py /XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/ af8f66 /XF11ID/analysis/2016_2/yuzhang/Results/August/af8f66/test/
'''
def check_dict_keys( dicts, key):
if key not in list(dicts.keys()):
dicts[key] = 'unknown'
import h5py
from reportlab.pdfgen import canvas
from reportlab.lib.units import inch, cm , mm
from reportlab.lib.colors import pink, green, brown, white, black, red, blue
from reportlab.lib.styles import getSampleStyleSheet
#from reportlab.platypus import Image, Paragraph, Table
from reportlab.lib.pagesizes import letter, A4
from chxanalys.chx_generic_functions import (pload_obj )
from PIL import Image
from time import time
from datetime import datetime
import sys,os
import pandas as pds
import numpy as np
def add_one_line_string( c, s, top, left=30, fontsize = 11 ):
if (fontsize*len(s )) >1000:
fontsize = 1000./(len(s))
c.setFont("Helvetica", fontsize )
c.drawString(left, top, s)
def add_image_string( c, imgf, data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top, return_ = False ):
image = data_dir + imgf
if os.path.exists(image):
im = Image.open( image )
ratio = float(im.size[1])/im.size[0]
height= img_height
width = height/ratio
#if width>400:
# width = 350
# height = width*ratio
c.drawImage( image, img_left, img_top, width= width,height=height,mask=None)
c.setFont("Helvetica", 16)
c.setFillColor( blue )
c.drawString(str1_left, str1_top,str1 )
c.setFont("Helvetica", 12)
c.setFillColor(red)
c.drawString(str2_left, str2_top, 'filename: %s'%imgf )
if return_:
return height/ratio
else:
c.setFillColor( blue )
c.drawString( str1_left, str1_top, str1)
c.setFillColor(red)
c.drawString( str1_left, str1_top -40, '-->Not Calculated!' )
class create_pdf_report( object ):
'''Aug 16, YG@CHX-NSLS-II
Create a pdf report by giving data_dir, uid, out_dir
data_dir: the input data directory, including all necessary images
the images names should be:
meta_file = 'uid=%s-md'%uid
avg_img_file = 'uid=%s--img-avg-.png'%uid
ROI_on_img_file = 'uid=%s--ROI-on-Image-.png'%uid
qiq_file = 'uid=%s--Circular-Average-.png'%uid
ROI_on_Iq_file = 'uid=%s--ROI-on-Iq-.png'%uid
Iq_t_file = 'uid=%s--Iq-t-.png'%uid
img_sum_t_file = 'uid=%s--img-sum-t.png'%uid
wat_file= 'uid=%s--Waterfall-.png'%uid
Mean_inten_t_file= 'uid=%s--Mean-intensity-of-each-ROI-.png'%uid
g2_file = 'uid=%s--g2-.png'%uid
g2_fit_file = 'uid=%s--g2--fit-.png'%uid
q_rate_file = 'uid=--%s--Q-Rate--fit-.png'%uid
two_time_file = 'uid=%s--Two-time-.png'%uid
two_g2_file = 'uid=%s--g2--two-g2-.png'%uid
uid: the unique id
out_dir: the output directory
report_type:
'saxs': report saxs results
'gisaxs': report gisaxs results
Output:
A PDF file with name as "XPCS Analysis Report for uid=%s"%uid in out_dir folder
'''
def __init__( self, data_dir, uid, out_dir=None, filename=None, load=True, user=None,
report_type='saxs',md=None ):
self.data_dir = data_dir
self.uid = uid
self.md = md
#print(md)
if user is None:
user = 'chx'
self.user = user
if out_dir is None:
out_dir = data_dir
if not os.path.exists(out_dir):
os.makedirs(out_dir)
self.out_dir=out_dir
self.styles = getSampleStyleSheet()
self.width, self.height = letter
self.report_type = report_type
dt =datetime.now()
CurTime = '%02d/%02d/%s/-%02d/%02d/' % ( dt.month, dt.day, dt.year,dt.hour,dt.minute)
self.CurTime = CurTime
if filename is None:
filename="XPCS_Analysis_Report_for_uid=%s.pdf"%uid
filename=out_dir + filename
c = canvas.Canvas( filename, pagesize=letter)
self.filename= filename
#c.setTitle("XPCS Analysis Report for uid=%s"%uid)
c.setTitle(filename)
self.c = c
if load:
self.load_metadata()
def load_metadata(self):
uid=self.uid
data_dir = self.data_dir
#load metadata
meta_file = 'uid=%s_md'%uid
self.metafile = data_dir + meta_file
if self.md is None:
md = pload_obj( data_dir + meta_file )
self.md = md
else:
md = self.md
#print('Get md from giving md')
#print(md)
self.sub_title_num = 0
uid_g2 = None
uid_c12 = None
if 'uid_g2' in list(md.keys()):
uid_g2 = md['uid_g2']
if 'uid_c12' in list(md.keys()):
uid_c12 = md['uid_c12']
'''global definition'''
try:
beg = md['beg']
end= md['end']
uid_ = uid + '_fra_%s_%s'%(beg, end)
except:
uid_ = uid
if beg is None:
uid_ = uid
self.avg_img_file = 'uid=%s_img_avg.png'%uid
self.ROI_on_img_file = 'uid=%s_ROI_on_Image.png'%uid
self.qiq_file = 'uid=%s_q_Iq.png'%uid
self.qiq_fit_file = 'uid=%s_form_factor_fit.png'%uid
#self.qr_1d_file = 'uid=%s_Qr_ROI.png'%uid
if self.report_type =='saxs' or self.report_type =='ang_saxs':
self.ROI_on_Iq_file = 'uid=%s_ROI_on_Iq.png'%uid
elif self.report_type =='gi_saxs':
self.ROI_on_Iq_file = 'uid=%s_Qr_ROI.png'%uid
self.Iq_t_file = 'uid=%s_q_Iqt.png'%uid
self.img_sum_t_file = 'uid=%s_img_sum_t.png'%uid
self.wat_file= 'uid=%s_waterfall.png'%uid
self.Mean_inten_t_file= 'uid=%s_t_ROIs.png'%uid
if uid_g2 is None:
uid_g2 = uid_
self.g2_file = 'uid=%s_g2.png'%uid_g2
self.g2_fit_file = 'uid=%s_g2_fit.png'%uid_g2
#print( self.g2_fit_file )
self.g2_new_page = False
self.g2_fit_new_page = False
jfn = 'uid=%s_g2__joint.png'%uid_g2
if os.path.exists( data_dir + jfn):
self.g2_file = jfn
self.g2_new_page = True
jfn = 'uid=%s_g2_fit__joint.png'%uid_g2
if os.path.exists(data_dir + jfn ):
self.g2_fit_file = jfn
self.g2_fit_new_page = True
self.q_rate_file = 'uid=%s_Q_Rate_fit.png'%uid_g2
#print( self.q_rate_file )
if uid_c12 is None:
uid_c12 = uid_
self.q_rate_two_time_fit_file = 'uid=%s_two_time_Q_Rate_fit.png'%uid_c12
#print( self.q_rate_two_time_fit_file )
self.two_time_file = 'uid=%s_Two_time.png'%uid_c12
self.two_g2_file = 'uid=%s_g2_two_g2.png'%uid_c12
jfn = 'uid=%s_g2_two_g2__joint.png'%uid_c12
self.two_g2_new_page = False
if os.path.exists( data_dir + jfn ):
self.two_g2_file = jfn
self.two_g2_new_page = True
self.four_time_file = 'uid=%s_g4.png'%uid_
self.xsvs_fit_file = 'uid=%s_xsvs_fit.png'%uid_
self.contrast_file = 'uid=%s_contrast.png'%uid_
self.dose_file = 'uid=%s_dose_analysis.png'%uid_
jfn = 'uid=%s_dose_analysis__joint.png'%uid_
self.dose_file_new_page = False
if os.path.exists( data_dir + jfn ):
self.dose_file = jfn
self.dose_file_new_page = True
#print( self.dose_file )
if False:
self.flow_g2v = 'uid=%s_1a_mqv_g2_v_fit.png'%uid_
self.flow_g2p = 'uid=%s_1a_mqp_g2_p_fit.png'%uid_
self.flow_g2v_rate_fit = 'uid=%s_v_fit_rate_Q_Rate_fit.png'%uid_
self.flow_g2p_rate_fit = 'uid=%s_p_fit_rate_Q_Rate_fit.png'%uid_
if True:
self.two_time = 'uid=%s_pv_two_time.png'%uid_
#self.two_time_v = 'uid=%s_pv_two_time.png'%uid_
#self.flow_g2bv = 'uid=%s_g2b_v_fit.png'%uid_
#self.flow_g2bp = 'uid=%s_g2b_p_fit.png'%uid_
self.flow_g2_g2b_p = 'uid=%s_g2_two_g2_p.png'%uid_
self.flow_g2_g2b_v = 'uid=%s_g2_two_g2_v.png'%uid_
self.flow_g2bv_rate_fit = 'uid=%s_vertb_Q_Rate_fit.png'%uid_
self.flow_g2bp_rate_fit = 'uid=%s_parab_Q_Rate_fit.png'%uid_
self.flow_g2v = 'uid=%s_g2_v_fit.png'%uid_
self.flow_g2p = 'uid=%s_g2_p_fit.png'%uid_
self.flow_g2v_rate_fit = 'uid=%s_vert_Q_Rate_fit.png'%uid_
self.flow_g2p_rate_fit = 'uid=%s_para_Q_Rate_fit.png'%uid_
#self.report_header(page=1, top=730, new_page=False)
#self.report_meta(new_page=False)
self.q2Iq_file = 'uid=%s_q2_iq.png'%uid
self.iq_invariant_file = 'uid=%s_iq_invariant.png'%uid
def report_invariant( self, top= 300, new_page=False):
'''create the invariant analysis report
two images:
ROI on average intensity image
ROI on circular average
'''
uid=self.uid
c= self.c
#add sub-title, static images
c.setFillColor(black)
c.setFont("Helvetica", 20)
ds = 230
self.sub_title_num +=1
c.drawString(10, top, "%s. I(q) Invariant Analysis"%self.sub_title_num ) #add title
#add q2Iq
c.setFont("Helvetica", 14)
imgf = self.q2Iq_file
#print( imgf )
label = 'q^2*I(q)'
add_image_string( c, imgf, self.data_dir, img_left= 60, img_top=top - ds*1.15, img_height=180,
str1_left=110, str1_top = top-35,str1=label,
str2_left = 60, str2_top = top -320 )
#add iq_invariant
imgf = self.iq_invariant_file
img_height= 180
img_left,img_top =320, top - ds*1.15
str1_left, str1_top,str1= 420, top- 35, 'I(q) Invariant'
str2_left, str2_top = 350, top- 320
#print ( imgf )
add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top )
if new_page:
c.showPage()
c.save()
def report_header(self, page=1, new_page=False):
'''create headers, including title/page number'''
c= self.c
CurTime = self.CurTime
uid=self.uid
user=self.user
c.setFillColor(black)
c.setFont("Helvetica", 14)
#add page number
c.drawString(250, 10, "Page--%s--"%( page ) )
#add time stamp
#c.drawString(350, 10, "Created at %s@CHX-by-%s"%( CurTime,user ) )
s_ = "Created at %s@CHX-By-%s"%( CurTime,user )
add_one_line_string( c, s_, 10, left=350,fontsize = 11 )
#add title
#c.setFont("Helvetica", 22)
title = "XPCS Analysis Report for uid=%s"%uid
c.setFont("Helvetica", 1000/( len(title) ) )
#c.drawString(180,760, "XPCS Report of uid=%s"%uid ) #add title
c.drawString(50,760, "XPCS Analysis Report for uid=%s"%uid ) #add title
#add a line under title
c.setStrokeColor( red )
c.setLineWidth(width=1.5)
c.line( 50, 750, 550, 750 )
if new_page:
c.showPage()
c.save()
def report_meta(self, top=740, new_page=False):
'''create the meta data report,
the meta data include:
uid
Sample:
Measurement
Wavelength
Detector-Sample Distance
Beam Center
Mask file
Data dir
Pipeline notebook
'''
c=self.c
#load metadata
md = self.md
try:
uid = md['uid']
except:
uid=self.uid
#add sub-title, metadata
c.setFont("Helvetica", 20)
ds = 15
self.sub_title_num += 1
c.drawString(10, top, "%s. Metadata"%self.sub_title_num ) #add title
top = top - 5
fontsize = 11
c.setFont("Helvetica", fontsize)
nec_keys = [ 'sample', 'start_time', 'stop_time','Measurement' ,'exposure time' ,'incident_wavelength', 'cam_acquire_t',
'frame_time','detector_distance', 'feedback_x', 'feedback_y', 'shutter mode',
'beam_center_x', 'beam_center_y', 'beam_refl_center_x', 'beam_refl_center_y','mask_file','bad_frame_list', 'transmission', 'roi_mask_file']
for key in nec_keys:
check_dict_keys(md, key)
try:#try exp time from detector
exposuretime= md['count_time'] #exposure time in sec
except:
exposuretime= md['cam_acquire_time'] #exposure time in sec
try:#try acq time from detector
acquisition_period = md['frame_time']
except:
try:
acquisition_period = md['acquire period']
except:
uid = md['uid']
acquisition_period = float( db[uid]['start']['acquire period'] )
s = []
s.append( 'UID: %s'%uid ) ###line 1, for uid
s.append('Sample: %s'%md['sample'] ) ####line 2 sample
s.append('Data Acquisition From: %s To: %s'%(md['start_time'], md['stop_time']))####line 3 Data Acquisition time
s.append( 'Measurement: %s'%md['Measurement'] ) ####line 4 'Measurement
s.append( 'Wavelength: %s A | Num of Image: %d | Exposure time: %s ms | Acquire period: %s ms'%( md['incident_wavelength'], int(md['number of images']),round(float(exposuretime)*1000,4), round(float( acquisition_period )*1000,4) ) ) ####line 5 'lamda...
s.append( 'Detector-Sample Distance: %s m| FeedBack Mode: x -> %s & y -> %s| Shutter Mode: %s'%(
md['detector_distance'], md['feedback_x'], md['feedback_y'], md['shutter mode'] ) ) ####line 6 'Detector-Sample Distance..
if self.report_type == 'saxs':
s7= 'Beam Center: [%s, %s] (pixel)'%(md['beam_center_x'], md['beam_center_y'])
elif self.report_type == 'gi_saxs':
s7= ('Incident Center: [%s, %s] (pixel)'%(md['beam_center_x'], md['beam_center_y']) +
' || ' +
'Reflect Center: [%s, %s] (pixel)'%(md['beam_refl_center_x'], md['beam_refl_center_y']) )
elif self.report_type == 'ang_saxs' or self.report_type == 'gi_waxs' :
s7= 'Beam Center: [%s, %s] (pixel)'%(md['beam_center_x'], md['beam_center_y'])
else:
s7 = ''
s7 += ' || ' + 'BadLen: %s'%len(md['bad_frame_list'])
s7 += ' || ' + 'Transmission: %s'%md['transmission']
s.append( s7 ) ####line 7 'Beam center...
m = 'Mask file: %s'%md['mask_file'] + ' || ' + 'ROI mask file: %s'%md['roi_mask_file']
#s.append( 'Mask file: %s'%md['mask_file'] ) ####line 8 mask filename
#s.append( ) ####line 8 mask filename
s.append(m)
s.append( 'Analysis Results Dir: %s'%self.data_dir ) ####line 9 results folder
s.append( 'Metadata Dir: %s.csv-&.pkl'%self.metafile ) ####line 10 metadata folder
try:
s.append( 'Pipeline notebook: %s'%md['NOTEBOOK_FULL_PATH'] ) ####line 11 notebook folder
except:
pass
#print( 'here' )
line =1
for s_ in s:
add_one_line_string( c, s_, top -ds*line , left=30,fontsize = fontsize )
line += 1
if new_page:
c.showPage()
c.save()
def report_static( self, top=560, new_page=False, iq_fit=False):
'''create the static analysis report
two images:
average intensity image
circular average
'''
#add sub-title, static images
c= self.c
c.setFont("Helvetica", 20)
uid=self.uid
ds = 220
self.sub_title_num +=1
c.drawString(10, top, "%s. Static Analysis"%self.sub_title_num ) #add title
#add average image
c.setFont("Helvetica", 14)
imgf = self.avg_img_file
if self.report_type == 'saxs':
ipos = 60
dshift=0
elif self.report_type == 'gi_saxs':
ipos = 200
dshift= 140
elif self.report_type == 'ang_saxs':
ipos = 200
dshift= 140
else:
ipos = 200
dshift= 140
add_image_string( c, imgf, self.data_dir, img_left= ipos, img_top=top-ds, img_height=180,
str1_left=90 + dshift, str1_top = top-35,str1='Average Intensity Image',
str2_left = 80 + dshift, str2_top = top -230 )
#add q_Iq
if self.report_type == 'saxs':
imgf = self.qiq_file
#print(imgf)
if iq_fit:
imgf = self.qiq_fit_file
label = 'Circular Average'
lab_pos = 390
fn_pos = 320
add_image_string( c, imgf, self.data_dir, img_left=320, img_top=top-ds, img_height=180,
str1_left=lab_pos, str1_top = top-35,str1=label,
str2_left = fn_pos, str2_top = top -230 )
else:
if False:
imgf = self.ROI_on_Iq_file #self.qr_1d_file
label = 'Qr-1D'
lab_pos = 420
fn_pos = 350
add_image_string( c, imgf, self.data_dir, img_left=320, img_top=top-ds, img_height=180,
str1_left=lab_pos, str1_top = top-35,str1=label,
str2_left = fn_pos, str2_top = top -230 )
if new_page:
c.showPage()
c.save()
def report_ROI( self, top= 300, new_page=False):
'''create the static analysis report
two images:
ROI on average intensity image
ROI on circular average
'''
uid=self.uid
c= self.c
#add sub-title, static images
c.setFillColor(black)
c.setFont("Helvetica", 20)
ds = 230
self.sub_title_num +=1
c.drawString(10, top, "%s. Define of ROI"%self.sub_title_num ) #add title
#add ROI on image
c.setFont("Helvetica", 14)
imgf = self.ROI_on_img_file
label = 'ROI on Image'
add_image_string( c, imgf, self.data_dir, img_left= 60, img_top=top - ds*1.15, img_height=240,
str1_left=110, str1_top = top-35,str1=label,
str2_left = 60, str2_top = top -260 )
#add q_Iq
if self.report_type == 'saxs' or self.report_type == 'gi_saxs' or self.report_type == 'ang_saxs':
imgf = self.ROI_on_Iq_file
img_height=180
img_left,img_top =320, top - ds
str1_left, str1_top,str1= 420, top- 35, 'ROI on Iq'
str2_left, str2_top = 350, top- 260
#print ( imgf )
add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top )
if new_page:
c.showPage()
c.save()
def report_time_analysis( self, top= 720,new_page=False):
'''create the time dependent analysis report
four images:
each image total intensity as a function of time
iq~t
waterfall
mean intensity of each ROI as a function of time
'''
c= self.c
uid=self.uid
#add sub-title, Time-dependent plot
c.setFont("Helvetica", 20)
top1=top
ds = 20
self.sub_title_num +=1
c.drawString(10, top, "%s. Time Dependent Plot"%self.sub_title_num ) #add title
c.setFont("Helvetica", 14)
top = top1 - 160
#add img_sum_t
if self.report_type == 'saxs':
ipos = 80
elif self.report_type == 'gi_saxs':
ipos = 200
elif self.report_type == 'ang_saxs':
ipos = 200
else:
ipos = 200
imgf = self.img_sum_t_file
img_height=140
img_left,img_top = ipos, top
str1_left, str1_top,str1= ipos + 60, top1 - 20 , 'img sum ~ t'
str2_left, str2_top = ipos, top- 5
add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top )
#plot iq~t
if self.report_type == 'saxs':
imgf = self.Iq_t_file
image = self.data_dir + imgf
img_height=140
img_left,img_top = 350, top
str1_left, str1_top,str1= 420, top1-20 , 'iq ~ t'
str2_left, str2_top = 360, top- 5
add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top )
elif self.report_type == 'gi_saxs':
pass
top = top1 - 340
#add waterfall plot
imgf = self.wat_file
img_height=160
img_left,img_top = 80, top
str1_left, str1_top,str1= 140, top + img_height, 'waterfall plot'
str2_left, str2_top = 80, top- 5
if self.report_type != 'ang_saxs':
add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top )
else:
pass
#add mean-intensity of each roi
imgf = self.Mean_inten_t_file
img_height=160
img_left,img_top = 360, top
str1_left, str1_top,str1= 330, top + img_height, 'Mean-intensity-of-each-ROI'
str2_left, str2_top = 310, top- 5
if self.report_type != 'ang_saxs':
add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top )
else:
pass
if new_page:
c.showPage()
c.save()
def report_one_time( self, top= 350, g2_fit_file=None, q_rate_file=None, new_page=False):
'''create the one time correlation function report
Two images:
One Time Correlation Function with fit
q-rate fit
'''
c= self.c
uid=self.uid
#add sub-title, One Time Correlation Function
c.setFillColor(black)
c.setFont("Helvetica", 20)
ds = 20
self.sub_title_num +=1
c.drawString(10, top, "%s. One Time Correlation Function"%self.sub_title_num ) #add title
c.setFont("Helvetica", 14)
#add g2 plot
if g2_fit_file is None:
imgf = self.g2_fit_file
else:
imgf = g2_fit_file
if self.report_type != 'ang_saxs':
img_height= 300
top = top - 320
str2_left, str2_top = 80, top- 0
else:
img_height= 550
top = top - 600
str2_left, str2_top = 80, top - 400
#add one_time caculation
img_left,img_top = 1, top
if self.g2_fit_new_page or self.g2_new_page:
img_height= 550
top = top - 250
str2_left, str2_top = 80, top - 0
img_left,img_top = 60, top
str1_left, str1_top,str1= 150, top + img_height, 'g2 fit plot'
img_width = add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top, return_=True )
#print( imgf,self.data_dir )
#add g2 plot fit
top = top + 70 #
if q_rate_file is None:
imgf = self.q_rate_file
else:
imgf = q_rate_file
if self.report_type != 'ang_saxs':
#print(img_width)
if img_width > 400:
img_height = 90
else:
img_height= 180
img_left,img_top = img_width-10, top #350, top
str2_left, str2_top = img_width + 50, top - 5 #380, top - 5
str1_left, str1_top,str1= 450, top + 230, 'q-rate fit plot'
else:
img_height= 300
img_left,img_top = 350, top - 150
str2_left, str2_top = 380, top - 5
str1_left, str1_top,str1= 450, top + 180, 'q-rate fit plot'
if self.g2_fit_new_page or self.g2_new_page:
top = top - 200
img_height= 180
img_left,img_top = 350, top
str2_left, str2_top = 380, top - 5
str1_left, str1_top,str1= 450, top + 230, 'q-rate fit plot'
add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top )
if new_page:
c.showPage()
c.save()
def report_mulit_one_time( self, top= 720,new_page=False):
'''create the mulit one time correlation function report
Two images:
One Time Correlation Function with fit
q-rate fit
'''
c= self.c
uid=self.uid
#add sub-title, One Time Correlation Function
c.setFillColor(black)
c.setFont("Helvetica", 20)
ds = 20
self.sub_title_num +=1
c.drawString(10, top, "%s. One Time Correlation Function"%self.sub_title_num ) #add title
c.setFont("Helvetica", 14)
#add g2 plot
top = top - 320
imgf = self.g2_fit_file
image = self.data_dir + imgf
if not os.path.exists(image):
image = self.data_dir + self.g2_file
im = Image.open( image )
ratio = float(im.size[1])/im.size[0]
height= 300
c.drawImage( image, 1, top, width= height/ratio,height=height, mask= 'auto')
#c.drawImage( image, 1, top, width= height/ratio,height=height, mask= None )
c.setFont("Helvetica", 16)
c.setFillColor( blue)
c.drawString( 150, top + height , 'g2 fit plot' )
c.setFont("Helvetica", 12)
c.setFillColor(red)
c.drawString( 80, top- 0, 'filename: %s'%imgf )
#add g2 plot fit
top = top + 70 #
imgf = self.q_rate_file
image = self.data_dir + imgf
if os.path.exists(image):
im = Image.open( image )
ratio = float(im.size[1])/im.size[0]
height= 180
c.drawImage( image, 350, top, width= height/ratio,height=height,mask= 'auto')
c.setFont("Helvetica", 16)
c.setFillColor( blue)
c.drawString( 450, top + 230, 'q-rate fit plot' )
c.setFont("Helvetica", 12)
c.setFillColor(red)
c.drawString( 380, top- 5, 'filename: %s'%imgf )
if new_page:
c.showPage()
c.save()
def report_two_time( self, top= 720, new_page=False):
'''create the one time correlation function report
Two images:
Two Time Correlation Function
two one-time correlatoin function from multi-one-time and from diagonal two-time
'''
c= self.c
uid=self.uid
#add sub-title, Time-dependent plot
c.setFont("Helvetica", 20)
ds = 20
self.sub_title_num +=1
c.drawString(10, top, "%s. Two Time Correlation Function"%self.sub_title_num ) #add title
c.setFont("Helvetica", 14)
top1=top
top = top1 - 330
#add q_Iq_t
imgf = self.two_time_file
img_height= 300
img_left,img_top = 80, top
str1_left, str1_top,str1= 180, top + 300, 'two time correlation function'
str2_left, str2_top = 180, top - 10
img_width = add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top, return_=True )
top = top - 340
#add q_Iq_t
imgf = self.two_g2_file
if True:#not self.two_g2_new_page:
img_height= 300
img_left,img_top = 100 -70, top
str1_left, str1_top,str1= 210-70, top + 310, 'compared g2'
str2_left, str2_top = 180-70, top - 10
if self.two_g2_new_page:
img_left,img_top = 100, top
img_width = add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top,return_=True )
#print(imgf)
top = top + 50
imgf = self.q_rate_two_time_fit_file
if img_width < 400:
img_height= 140
img_left,img_top = 350, top + 30
str2_left, str2_top = 380 - 80, top - 5
str1_left, str1_top,str1= 450 -80 , top + 230, 'q-rate fit from two-time'
else:
img_height = 90
img_left,img_top = img_width-10, top #350, top
str2_left, str2_top = img_width + 50, top - 5 #380, top - 5
str1_left, str1_top,str1= 450, top + 230, 'q-rate fit plot'
add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top )
if new_page:
c.showPage()
c.save()
def report_four_time( self, top= 720, new_page=False):
'''create the one time correlation function report
Two images:
Two Time Correlation Function
two one-time correlatoin function from multi-one-time and from diagonal two-time
'''
c= self.c
uid=self.uid
#add sub-title, Time-dependent plot
c.setFont("Helvetica", 20)
ds = 20
self.sub_title_num +=1
c.drawString(10, top, "%s. Four Time Correlation Function"%self.sub_title_num ) #add title
c.setFont("Helvetica", 14)
top1=top
top = top1 - 330
#add q_Iq_t
imgf = self.four_time_file
img_height= 300
img_left,img_top = 80, top
str1_left, str1_top,str1= 180, top + 300, 'four time correlation function'
str2_left, str2_top = 180, top - 10
add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top )
if new_page:
c.showPage()
c.save()
def report_dose( self, top= 720, new_page=False):
c= self.c
uid=self.uid
#add sub-title, Time-dependent plot
c.setFont("Helvetica", 20)
ds = 20
self.sub_title_num +=1
c.drawString(10, top, "%s. Dose Analysis"%self.sub_title_num ) #add title
c.setFont("Helvetica", 14)
top1=top
top = top1 - 530
#add q_Iq_t
imgf = self.dose_file
img_height= 500
img_left,img_top = 80, top
str1_left, str1_top,str1= 180, top + 500, 'dose analysis'
str2_left, str2_top = 180, top - 10
#print( self.data_dir + self.dose_file)
if os.path.exists( self.data_dir + imgf):
#print( self.dose_file)
im = Image.open( self.data_dir + imgf )
ratio = float(im.size[1])/im.size[0]
width = img_height/ratio
#print(width)
if width >450:
img_height = 450*ratio
if self.dose_file_new_page:
#img_left,img_top = 180, top
img_left,img_top = 100, top
add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top )
if new_page:
c.showPage()
c.save()
def report_flow_pv_g2( self, top= 720, new_page=False):
'''create the one time correlation function report
Two images:
Two Time Correlation Function
two one-time correlatoin function from multi-one-time and from diagonal two-time
'''
c= self.c
uid=self.uid
#add sub-title, Time-dependent plot
c.setFont("Helvetica", 20)
ds = 20
self.sub_title_num +=1
c.drawString(10, top, "%s. Flow One Time Analysis"%self.sub_title_num ) #add title
c.setFont("Helvetica", 14)
top1=top
top = top1 - 330
#add xsvs fit
imgf = self.flow_g2v
image = self.data_dir + imgf
img_height= 300
img_left,img_top = 80, top
str1_left, str1_top,str1= 210, top + 300, 'XPCS Vertical Flow'
str2_left, str2_top = 180, top - 10
add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top )
imgf = self.flow_g2v_rate_fit
img_height= 200
img_left,img_top = 350, top +50
str1_left, str1_top,str1= 210, top + 300, ''
str2_left, str2_top = 350, top - 10 + 50
add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top )
top = top - 340
#add contrast fit
imgf = self.flow_g2p
img_height= 300
img_left,img_top = 80, top
str1_left, str1_top,str1= 210, top + 300, 'XPCS Parallel Flow'
str2_left, str2_top = 180, top - 10
add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top )
imgf = self.flow_g2p_rate_fit
img_height= 200
img_left,img_top = 350, top +50
str1_left, str1_top,str1= 210, top + 300, ''
str2_left, str2_top = 350, top - 10 + 50
add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top )
if new_page:
c.showPage()
c.save()
def report_flow_pv_two_time( self, top= 720, new_page=False):
'''create the two time correlation function report
Two images:
Two Time Correlation Function
two one-time correlatoin function from multi-one-time and from diagonal two-time
'''
c= self.c
uid=self.uid
#add sub-title, Time-dependent plot
c.setFont("Helvetica", 20)
ds = 20
self.sub_title_num +=1
c.drawString(10, top, "%s. Flow One &Two Time Comparison"%self.sub_title_num ) #add title
c.setFont("Helvetica", 14)
top1=top
top = top1 - 330
#add xsvs fit
if False:
imgf = self.two_time
image = self.data_dir + imgf
img_height= 300
img_left,img_top = 80, top
str1_left, str1_top,str1= 210, top + 300, 'Two_time'
str2_left, str2_top = 180, top - 10
add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top )
imgf = self.flow_g2_g2b_p
img_height= 300
img_left,img_top = 80, top
str1_left, str1_top,str1= 210, top + 300, 'XPCS Vertical Flow by two-time'
str2_left, str2_top = 180, top - 10
add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top )
imgf = self.flow_g2bp_rate_fit
img_height= 200
img_left,img_top = 350, top +50
str1_left, str1_top,str1= 210, top + 300, ''
str2_left, str2_top = 350, top - 10 + 50
add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top )
top = top - 340
#add contrast fit
imgf = self.flow_g2_g2b_v
img_height= 300
img_left,img_top = 80, top
str1_left, str1_top,str1= 210, top + 300, 'XPCS Parallel Flow by two-time'
str2_left, str2_top = 180, top - 10
add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top )
imgf = self.flow_g2bv_rate_fit
img_height= 200
img_left,img_top = 350, top +50
str1_left, str1_top,str1= 210, top + 300, ''
str2_left, str2_top = 350, top - 10 + 50
add_image_string( c, imgf, self.data_dir, img_left, img_top, img_height,
str1_left, str1_top,str1,
str2_left, str2_top )
if new_page:
c.showPage()
c.save()
def report_xsvs( self, top= 720, new_page=False):
'''create the one time correlation function report
Two images:
Two Time Correlation Function
two one-time correlatoin function from multi-one-time and from diagonal two-time
'''
c= self.c
uid=self.uid
#add sub-title, Time-dependent plot
c.setFont("Helvetica", 20)
ds = 20
self.sub_title_num +=1
c.drawString(10, top, "%s. Visibility Analysis"%self.sub_title_num ) #add title
c.setFont("Helvetica", 14)
top = top - 330
#add xsvs fit
imgf = self.xsvs_fit_file
add_image_string( c, imgf, self.data_dir, img_left=100, img_top=top, img_height= 300,
str1_left=210, str1_top = top +300,str1='XSVS_Fit_by_Negtive_Binomal Function',
str2_left = 180, str2_top = top -10 )
#add contrast fit
top = top -340
imgf = self.contrast_file
add_image_string( c, imgf, self.data_dir, img_left=100, img_top=top, img_height= 300,
str1_left=210, str1_top = top + 310,str1='contrast get from xsvs and xpcs',
str2_left = 180, str2_top = top -10 )
if False:
top1=top
top = top1 - 330
#add xsvs fit
imgf = self.xsvs_fit_file
image = self.data_dir + imgf
im = Image.open( image )
ratio = float(im.size[1])/im.size[0]
height= 300
c.drawImage( image, 100, top, width= height/ratio,height=height,mask=None)
c.setFont("Helvetica", 16)
c.setFillColor( blue)
c.drawString( 210, top + 300 , 'XSVS_Fit_by_Negtive_Binomal Function' )
c.setFont("Helvetica", 12)
c.setFillColor(red)
c.drawString( 180, top- 10, 'filename: %s'%imgf )
top = top - 340
#add contrast fit
imgf = self.contrast_file
image = self.data_dir + imgf
im = Image.open( image )
ratio = float(im.size[1])/im.size[0]
height= 300
c.drawImage( image, 100, top, width= height/ratio,height=height,mask=None)
c.setFont("Helvetica", 16)
c.setFillColor( blue)
c.drawString( 210, top + 310, 'contrast get from xsvs and xpcs' )
c.setFont("Helvetica", 12)
c.setFillColor(red)
c.drawString( 180, top- 10, 'filename: %s'%imgf )
if new_page:
c.showPage()
c.save()
def new_page(self):
c=self.c
c.showPage()
def save_page(self):
c=self.c
c.save()
def done(self):
out_dir = self.out_dir
uid=self.uid
print()
print('*'*40)
print ('The pdf report is created with filename as: %s'%(self.filename ))
print('*'*40)
def create_multi_pdf_reports_for_uids( uids, g2, data_dir, report_type='saxs', append_name='' ):
''' Aug 16, YG@CHX-NSLS-II
Create multi pdf reports for each uid in uids
uids: a list of uids to be reported
g2: a dictionary, {run_num: sub_num: g2_of_each_uid}
data_dir:
Save pdf report in data dir
'''
for key in list( g2.keys()):
i=1
for sub_key in list( g2[key].keys() ):
uid_i = uids[key][sub_key]
data_dir_ = os.path.join( data_dir, '%s/'%uid_i )
if append_name!='':
uid_name = uid_i + append_name
else:
uid_name = uid_i
c= create_pdf_report( data_dir_, uid_i,data_dir,
report_type=report_type, filename="XPCS_Analysis_Report_for_uid=%s.pdf"%uid_name )
#Page one: Meta-data/Iq-Q/ROI
c.report_header(page=1)
c.report_meta( top=730)
#c.report_one_time( top= 500 )
#c.new_page()
if report_type =='flow':
c.report_flow_pv_g2( top= 720)
c.save_page()
c.done()
def create_one_pdf_reports_for_uids( uids, g2, data_dir, filename='all_in_one', report_type='saxs' ):
''' Aug 16, YG@CHX-NSLS-II
Create one pdf reports for each uid in uids
uids: a list of uids to be reported
g2: a dictionary, {run_num: sub_num: g2_of_each_uid}
data_dir:
Save pdf report in data dir
'''
c= create_pdf_report( data_dir, uid=filename, out_dir=data_dir, load=False, report_type= report_type)
page=1
for key in list( g2.keys()):
i=1
for sub_key in list( g2[key].keys() ):
uid_i = uids[key][sub_key]
data_dir_ = os.path.join( data_dir, '%s/'%uid_i)
c.uid = uid_i
c.data_dir = data_dir_
c.load_metadata()
#Page one: Meta-data/Iq-Q/ROI
c.report_header(page=page)
c.report_meta( top=730)
c.report_one_time( top= 500 )
c.new_page()
page += 1
c.uid = filename
c.save_page()
c.done()
def save_res_h5( full_uid, data_dir, save_two_time=False ):
'''
YG. Nov 10, 2016
save the results to a h5 file
will save meta data/avg_img/mask/roi (ring_mask or box_mask)/
will aslo save multi-tau calculated one-time correlation function g2/taus
will also save two-time derived one-time correlation function /g2b/taus2
if save_two_time if True, will save two-time correaltion function
'''
with h5py.File(data_dir + '%s.h5'%full_uid, 'w') as hf:
#write meta data
meta_data = hf.create_dataset("meta_data", (1,), dtype='i')
for key in md.keys():
try:
meta_data.attrs[key] = md[key]
except:
pass
shapes = md['avg_img'].shape
avg_h5 = hf.create_dataset("avg_img", data = md['avg_img'] )
mask_h5 = hf.create_dataset("mask", data = md['mask'] )
roi_h5 = hf.create_dataset("roi", data = md['ring_mask'] )
g2_h5 = hf.create_dataset("g2", data = g2 )
taus_h5 = hf.create_dataset("taus", data = taus )
if save_two_time:
g12b_h5 = hf.create_dataset("g12b", data = g12b )
g2b_h5 = hf.create_dataset("g2b", data = g2b )
taus2_h5 = hf.create_dataset("taus2", data = taus2 )
def printname(name):
print (name)
#f.visit(printname)
def load_res_h5( full_uid, data_dir ):
'''YG. Nov 10, 2016
load results from a h5 file
will load meta data/avg_img/mask/roi (ring_mask or box_mask)/
will aslo load multi-tau calculated one-time correlation function g2/taus
will also load two-time derived one-time correlation function /g2b/taus2
if save_two_time if True, will load two-time correaltion function
'''
with h5py.File(data_dir + '%s.h5'%full_uid, 'r') as hf:
meta_data_h5 = hf.get( "meta_data" )
meta_data = {}
for att in meta_data_h5.attrs:
meta_data[att] = meta_data_h5.attrs[att]
avg_h5 = np.array( hf.get("avg_img" ) )
mask_h5 = np.array(hf.get("mask" ))
roi_h5 =np.array( hf.get("roi" ))
g2_h5 = np.array( hf.get("g2" ))
taus_h5 = np.array( hf.get("taus" ))
g2b_h5 = np.array( hf.get("g2b"))
taus2_h5 = np.array( hf.get("taus2"))
if 'g12b' in hf:
g12b_h5 = np.array( hf.get("g12b"))
if 'g12b' in hf:
return meta_data, avg_h5, mask_h5,roi_h5, g2_h5, taus_h5, g2b_h5, taus2_h5, g12b
else:
return meta_data, avg_h5, mask_h5,roi_h5, g2_h5, taus_h5, g2b_h5, taus2_h5
def make_pdf_report( data_dir, uid, pdf_out_dir, pdf_filename, username,
run_fit_form, run_one_time, run_two_time, run_four_time, run_xsvs, run_dose=None, report_type='saxs', md=None,report_invariant=False, return_class=False
):
if uid.startswith("uid=") or uid.startswith("Uid="):
uid = uid[4:]
c= create_pdf_report( data_dir, uid, pdf_out_dir, filename= pdf_filename, user= username, report_type=report_type, md = md )
#print( c.md)
#Page one: Meta-data/Iq-Q/ROI
c.report_header(page=1)
c.report_meta( top=730)
c.report_static( top=540, iq_fit = run_fit_form )
c.report_ROI( top= 290)
#Page Two: img~t/iq~t/waterfall/mean~t/g2/rate~q
c.new_page()
c.report_header(page=2)
page = 2
if c.report_type != 'ang_saxs':
c.report_time_analysis( top= 720)
if run_one_time:
if c.report_type != 'ang_saxs':
top = 350
else:
top = 500
if c.g2_fit_new_page:
c.new_page()
page +=1
top = 720
c.report_one_time( top= top )
#self.two_g2_new_page = True
#self.g2_fit_new_page = True
#Page Three: two-time/two g2
if run_two_time:
c.new_page()
page +=1
c.report_header(page= page)
c.report_two_time( top= 720 )
if run_four_time:
c.new_page()
page +=1
c.report_header(page= page)
c.report_four_time( top= 720 )
if run_xsvs:
c.new_page()
page +=1
c.report_header(page= page)
c.report_xsvs( top= 720 )
if run_dose:
c.new_page()
page +=1
c.report_header(page= page)
c.report_dose( top = 702)
if report_invariant:
c.new_page()
page +=1
c.report_header(page= page)
c.report_invariant( top = 702)
else:
c.report_flow_pv_g2( top= 720, new_page= True)
c.report_flow_pv_two_time( top= 720, new_page= True )
c.save_page()
c.done()
if return_class:
return c
######################################
###Deal with saving dict to hdf5 file
def save_dict_to_hdf5(dic, filename):
"""
....
"""
with h5py.File(filename, 'w') as h5file:
recursively_save_dict_contents_to_group(h5file, '/', dic)
def load_dict_from_hdf5(filename):
"""
....
"""
with h5py.File(filename, 'r') as h5file:
return recursively_load_dict_contents_from_group(h5file, '/')
def recursively_save_dict_contents_to_group( h5file, path, dic):
"""..."""
# argument type checking
if not isinstance(dic, dict):
raise ValueError("must provide a dictionary")
if not isinstance(path, str):
raise ValueError("path must be a string")
if not isinstance(h5file, h5py._hl.files.File):
raise ValueError("must be an open h5py file")
# save items to the hdf5 file
for key, item in dic.items():
#print(key,item)
key = str(key)
if isinstance(item, list):
item = np.array(item)
#print(item)
if not isinstance(key, str):
raise ValueError("dict keys must be strings to save to hdf5")
# save strings, numpy.int64, and numpy.float64 types
if isinstance(item, (np.int64, np.float64, str, np.float, float, np.float32,int)):
#print( 'here' )
h5file[path + key] = item
if not h5file[path + key].value == item:
raise ValueError('The data representation in the HDF5 file does not match the original dict.')
# save numpy arrays
elif isinstance(item, np.ndarray):
try:
h5file[path + key] = item
except:
item = np.array(item).astype('|S9')
h5file[path + key] = item
if not np.array_equal(h5file[path + key].value, item):
raise ValueError('The data representation in the HDF5 file does not match the original dict.')
# save dictionaries
elif isinstance(item, dict):
recursively_save_dict_contents_to_group(h5file, path + key + '/', item)
# other types cannot be saved and will result in an error
else:
#print(item)
raise ValueError('Cannot save %s type.' % type(item))
def recursively_load_dict_contents_from_group( h5file, path):
"""..."""
ans = {}
for key, item in h5file[path].items():
if isinstance(item, h5py._hl.dataset.Dataset):
ans[key] = item.value
elif isinstance(item, h5py._hl.group.Group):
ans[key] = recursively_load_dict_contents_from_group(h5file, path + key + '/')
return ans
def export_xpcs_results_to_h5( filename, export_dir, export_dict ):
'''
YG. May 10, 2017
save the results to a h5 file
filename: the h5 file name
export_dir: the exported file folder
export_dict: dict, with keys as md, g2, g4 et.al.
'''
fout = export_dir + filename
dicts = ['md', 'qval_dict', 'qval_dict_v', 'qval_dict_p']
dict_nest=['taus_uids', 'g2_uids' ]
with h5py.File(fout, 'w') as hf:
for key in list(export_dict.keys()):
#print( key )
if key in dicts: #=='md' or key == 'qval_dict':
md= export_dict[key]
meta_data = hf.create_dataset( key, (1,), dtype='i')
for key_ in md.keys():
try:
meta_data.attrs[str(key_)] = md[key_]
except:
pass
elif key in dict_nest:
#print(key)
try:
recursively_save_dict_contents_to_group(hf, '/%s/'%key, export_dict[key] )
except:
print("Can't export the key: %s in this dataset."%key)
elif key in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']:
export_dict[key].to_hdf( fout, key=key, mode='a', )
else:
data = hf.create_dataset(key, data = export_dict[key] )
#add this fill line at Octo 27, 2017
data.set_fill_value = np.nan
print( 'The xpcs analysis results are exported to %s with filename as %s'%(export_dir , filename))
def extract_xpcs_results_from_h5_debug( filename, import_dir, onekey=None, exclude_keys=None ):
'''
YG. Dec 22, 2016
extract data from a h5 file
filename: the h5 file name
import_dir: the imported file folder
onekey: string, if not None, only extract that key
return:
extact_dict: dict, with keys as md, g2, g4 et.al.
'''
import pandas as pds
import numpy as np
extract_dict = {}
fp = import_dir + filename
pds_type_keys = []
dicts = ['md', 'qval_dict', 'qval_dict_v', 'qval_dict_p', 'taus_uids', 'g2_uids']
if exclude_keys is None:
exclude_keys =[]
if onekey is None:
for k in dicts:
extract_dict[k] = {}
with h5py.File( fp, 'r') as hf:
#print (list( hf.keys()) )
for key in list( hf.keys()):
if key not in exclude_keys:
if key in dicts:
extract_dict[key] = recursively_load_dict_contents_from_group(hf, '/' + key + '/')
elif key in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']:
pds_type_keys.append( key )
else:
extract_dict[key] = np.array( hf.get( key ))
for key in pds_type_keys:
if key not in exclude_keys:
extract_dict[key] = pds.read_hdf(fp, key= key )
else:
if onekey == 'md':
with h5py.File( fp, 'r') as hf:
md = hf.get('md')
for key in list(md.attrs):
extract_dict['md'][key] = md.attrs[key]
elif onekey in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']:
extract_dict[onekey] = pds.read_hdf(fp, key= onekey )
else:
try:
with h5py.File( fp, 'r') as hf:
extract_dict[onekey] = np.array( hf.get( onekey ))
except:
print("The %s dosen't have this %s value"%(fp, onekey) )
return extract_dict
def export_xpcs_results_to_h5_old( filename, export_dir, export_dict ):
'''
YG. Dec 22, 2016
save the results to a h5 file
filename: the h5 file name
export_dir: the exported file folder
export_dict: dict, with keys as md, g2, g4 et.al.
'''
import h5py
fout = export_dir + filename
dicts = ['md', 'qval_dict', 'qval_dict_v', 'qval_dict_p'] #{k1: { }}
dict_nest= ['taus_uids', 'g2_uids'] #{k1: {k2:}}
with h5py.File(fout, 'w') as hf:
for key in list(export_dict.keys()):
#print( key )
if key in dicts: #=='md' or key == 'qval_dict':
md= export_dict[key]
meta_data = hf.create_dataset( key, (1,), dtype='i')
for key_ in md.keys():
try:
meta_data.attrs[str(key_)] = md[key_]
except:
pass
elif key in dict_nest:
k1 = export_dict[key]
v1 = hf.create_dataset( key, (1,), dtype='i')
for k2 in k1.keys():
v2 = hf.create_dataset( k1, (1,), dtype='i')
elif key in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']:
export_dict[key].to_hdf( fout, key=key, mode='a', )
else:
data = hf.create_dataset(key, data = export_dict[key] )
print( 'The xpcs analysis results are exported to %s with filename as %s'%(export_dir , filename))
def extract_xpcs_results_from_h5( filename, import_dir, onekey=None, exclude_keys=None ):
'''
YG. Dec 22, 2016
extract data from a h5 file
filename: the h5 file name
import_dir: the imported file folder
onekey: string, if not None, only extract that key
return:
extact_dict: dict, with keys as md, g2, g4 et.al.
'''
import pandas as pds
import numpy as np
extract_dict = {}
fp = import_dir + filename
pds_type_keys = []
dicts = ['md', 'qval_dict', 'qval_dict_v', 'qval_dict_p', 'taus_uids', 'g2_uids']
if exclude_keys is None:
exclude_keys =[]
if onekey is None:
for k in dicts:
extract_dict[k] = {}
with h5py.File( fp, 'r') as hf:
#print (list( hf.keys()) )
for key in list( hf.keys()):
if key not in exclude_keys:
if key in dicts:
md = hf.get(key)
for key_ in list(md.attrs):
#print(key, key_)
if key == 'qval_dict':
extract_dict[key][int(key_)] = md.attrs[key_]
else:
extract_dict[key][key_] = md.attrs[key_]
elif key in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']:
pds_type_keys.append( key )
else:
extract_dict[key] = np.array( hf.get( key ))
for key in pds_type_keys:
if key not in exclude_keys:
extract_dict[key] = pds.read_hdf(fp, key= key )
else:
if onekey == 'md':
with h5py.File( fp, 'r') as hf:
md = hf.get('md')
for key in list(md.attrs):
extract_dict['md'][key] = md.attrs[key]
elif onekey in ['g2_fit_paras','g2b_fit_paras', 'spec_km_pds', 'spec_pds', 'qr_1d_pds']:
extract_dict[onekey] = pds.read_hdf(fp, key= onekey )
else:
try:
with h5py.File( fp, 'r') as hf:
extract_dict[onekey] = np.array( hf.get( onekey ))
except:
print("The %s dosen't have this %s value"%(fp, onekey) )
return extract_dict
def read_contrast_from_multi_csv( uids, path, times=None, unit=20 ):
'''Y.G. 2016, Dec 23, load contrast from multi csv file'''
N = len(uids)
if times is None:
times = np.array( [0] + [2**i for i in range(N)] )*unit
for i, uid in enumerate(uids):
fp = path + uid + '/uid=%s--contrast_factorL.csv'%uid
contri = pds.read_csv( fp )
qs = np.array( contri[contri.columns[0]] )
contri_ = np.array( contri[contri.columns[1]] )
if i ==0:
contr = np.zeros( [ N, len(qs)])
contr[i] = contri_
#contr[0,:] = np.nan
return times, contr
def read_contrast_from_multi_h5( uids, path, ):
'''Y.G. 2016, Dec 23, load contrast from multi h5 file'''
N = len(uids)
times_xsvs = np.zeros( N )
for i, uid in enumerate(uids):
t = extract_xpcs_results_from_h5( filename= '%s_Res.h5'%uid,
import_dir = path + uid + '/' , onekey= 'times_xsvs')
times_xsvs[i] = t['times_xsvs'][0]
contri = extract_xpcs_results_from_h5( filename= '%s_Res.h5'%uid,
import_dir = path + uid + '/' , onekey= 'contrast_factorL')
if i ==0:
contr = np.zeros( [ N, contri['contrast_factorL'].shape[0] ])
contr[i] = contri['contrast_factorL'][:,0]
return times_xsvs, contr
|
yugangzhang/chxanalys
|
chxanalys/Create_Report.py
|
Python
|
bsd-3-clause
| 62,451
|
[
"VisIt"
] |
1c5c2d7e6b1755822fb44a80667aa66b1ae004ada8ddccf83cdb9c0c8dc1c8f1
|
from pymatgen.core.structure import Structure
from connectivity_from_structure import get_connectivity_matrix, Polyhedra
from effective_coordination import EffectiveCoordFinder
import unittest
__author__ = 'Tina'
"""
Each unit test checks whether the connectivities between polyhedra as well as the coordination number match those in
the literature ("The Major Ternary Structural Families", O. Muller, R. Roy; "Transition Metal Oxides", Rao Raveau)
"""
class TestVariousStructures(unittest.TestCase):
def test_barite(self):
"""Testing coordination and connectivity matrix for barite structure BaSO4"""
barite_structure = Structure.from_file('test_structures/barite.cif', True, True)
cn_finder = EffectiveCoordFinder(barite_structure)
cns = cn_finder.get_avg_cn(anions=['O'])
for cation in cns:
self.assertTrue(cation == "Ba" or cation == "S",
"Ba and SO4 polyanions should be the only ions in barite")
if cation == 'Ba':
self.assertEqual(round(cns[cation]), 8, "Ba should be 8-fold coordinated")
if cation == 'S':
self.assertEqual(round(cns[cation]), 4, "S should be 4-fold coordinated")
central_species = ['Ba', 'S']
peripheral_species = ['O']
connectivity_matrix, connectivity_polyhedra = \
get_connectivity_matrix(barite_structure, False, 3.0, peripheral_species, central_species)
self.assertIn('Ba', connectivity_matrix.keys(), "Ba polyhedra not found in barite matrix")
self.assertIn('S', connectivity_matrix.keys(), "SO4 polyanions not found in barite matrix")
self.assertEqual(connectivity_matrix['Ba']['Ba']['point'], 4, "Ba should be point-sharing")
self.assertEqual(connectivity_matrix['Ba']['Ba']['edge'], 4, "Ba should be edge--sharing")
self.assertEqual(connectivity_matrix['Ba']['Ba']['face'], 0, "Ba should be not be face-sharing")
self.assertEqual(connectivity_matrix['S']['S']['point'], 0, "S should be isolated")
self.assertEqual(connectivity_matrix['S']['S']['edge'], 0, "S should be isolated")
self.assertEqual(connectivity_matrix['S']['S']['face'], 0, "S should be isolated")
self.assertEqual(connectivity_matrix['Ba']['S']['point'], 6, "Ba and S should be point-sharing")
self.assertEqual(connectivity_matrix['Ba']['S']['edge'], 1, "Ba and S should be edge-sharing")
self.assertEqual(connectivity_matrix['Ba']['S']['face'], 0, "Ba and S should not be face-sharing")
for connectivity_type in connectivity_matrix['Ba']['S'].keys():
self.assertEqual(connectivity_matrix['Ba']['S'][connectivity_type]*barite_structure.composition['Ba'],
connectivity_matrix['S']['Ba'][connectivity_type]*barite_structure.composition['S'],
"Total number of sharing instances between 'Ba' and 'S' "
"should be same as total number of sharing"
"instances between 'S' and 'Ba'")
for poly in connectivity_polyhedra:
self.assertIsInstance(poly, Polyhedra, "List of polyhedra includes a non-polyhedra element")
def test_k2so4(self):
"""Testing coordination and connectivity matrix for beta-K2SO4 structure"""
k2so4_structure = Structure.from_file('test_structures/beta-K2SO4.cif', True, True)
cn_finder = EffectiveCoordFinder(k2so4_structure)
cns = cn_finder.get_avg_cn(radius=3.2, anions=['O'])
for cation in cns:
self.assertTrue(cation == "K" or cation == "S", "Ba and S should be only ions in beta-K2SO4")
if cation == 'K':
self.assertEqual(round(cns[cation]), 8, "K should be 8-fold coordinated")
if cation == 'S':
self.assertEqual(round(cns[cation]), 4, "S should be 4-fold coordinated")
central_species = ['K', 'S']
peripheral_species = ['O']
connectivity_matrix, connectivity_polyhedra = \
get_connectivity_matrix(k2so4_structure, False, 3.2, peripheral_species, central_species)
self.assertIn('K', connectivity_matrix.keys(), "K polyhedra not found in beta-K2SO4 matrix")
self.assertIn('S', connectivity_matrix.keys(), "SO4 polyanions not found in beta-K2SO4 matrix")
self.assertEqual(connectivity_matrix['K']['K']['point'], 6, "K should be point-sharing")
self.assertEqual(connectivity_matrix['K']['K']['edge'], 4, "K should be edge-sharing")
self.assertEqual(connectivity_matrix['K']['K']['face'], 6, "K should be face-sharing")
self.assertEqual(connectivity_matrix['S']['S']['point'], 0, "S should be isolated")
self.assertEqual(connectivity_matrix['S']['S']['edge'], 0, "S should be isolated")
self.assertEqual(connectivity_matrix['S']['S']['face'], 0, "S should be isolated")
self.assertEqual(connectivity_matrix['S']['K']['point'], 4, "S and K should be point-sharing")
self.assertEqual(connectivity_matrix['S']['K']['edge'], 7, "S and K should be edge-sharing")
self.assertEqual(connectivity_matrix['S']['K']['face'], 0, "S and K should not be face-sharing")
for connectivity_type in connectivity_matrix['K']['S'].keys():
self.assertEqual(connectivity_matrix['K']['S'][connectivity_type]*k2so4_structure.composition['K'],
connectivity_matrix['S']['K'][connectivity_type]*k2so4_structure.composition['S'],
"Total number of sharing instances between 'K' and 'S' "
"should be same as total number of sharing"
"instances between 'S' and 'K'")
for poly in connectivity_polyhedra:
self.assertIsInstance(poly, Polyhedra, "List of polyhedra includes a non-polyhedra element")
def test_fluorite(self):
"""Testing coordination and connectivity matrix for fluorite structure CaF2"""
caf2_structure = Structure.from_file('test_structures/fluorite.cif', True, True)
cn_finder = EffectiveCoordFinder(caf2_structure)
cns = cn_finder.get_avg_cn(radius=2.6, anions=['F'])
for cation in cns:
self.assertEqual(cation, "Ca", "Ca should be the only ions in CaF2 fluorite")
if cation == 'Ca':
self.assertEqual(round(cns[cation]), 8, "Ca should be 8-fold coordinated")
central_species = ['Ca']
peripheral_species = ['F']
connectivity_matrix, connectivity_polyhedra = \
get_connectivity_matrix(caf2_structure, False, 2.6, peripheral_species, central_species)
self.assertIn('Ca', connectivity_matrix.keys(), "Ca polyhedra not found in CaF2 fluorite matrix")
self.assertEqual(connectivity_matrix['Ca']['Ca']['point'], 0, "Ca should not be point-sharing")
self.assertEqual(connectivity_matrix['Ca']['Ca']['edge'], 12, "Ca should be edge-sharing")
self.assertEqual(connectivity_matrix['Ca']['Ca']['face'], 0, "Ca should not be face-sharing")
for poly in connectivity_polyhedra:
self.assertIsInstance(poly, Polyhedra, "List of polyhedra includes a non-polyhedra element")
def test_cafe2o4(self):
"""Testing coordination and connectivity matrix for CaFe2O4 structure"""
cafe2o4_structure = Structure.from_file('test_structures/CaFe2O4.cif', True, True)
cn_finder = EffectiveCoordFinder(cafe2o4_structure)
cns = cn_finder.get_avg_cn(radius=2.6, anions=['O'])
for cation in cns:
self.assertTrue(cation == "Ca" or cation == "Fe",
"Ca and Fe should be the only cations in CaFe2O4 structure")
if cation == 'Ca':
self.assertLessEqual(round(cns[cation]), 8,
"Ca should be 8-fold coordinated (effective coordination underestimates)")
if cation == 'Fe':
self.assertEqual(round(cns[cation]), 6, "Fe should be 6-fold coordinated")
central_species = ['Ca', 'Fe']
peripheral_species = ['O']
connectivity_matrix, connectivity_polyhedra = \
get_connectivity_matrix(cafe2o4_structure, False, 2.6, peripheral_species, central_species)
self.assertIn('Ca', connectivity_matrix.keys(), "Ca cations not found in matrix")
self.assertIn('Fe', connectivity_matrix.keys(), "Fe cations not found in matrix")
self.assertEqual(connectivity_matrix['Ca']['Ca']['point'], 2, "Ca should be point-sharing")
self.assertEqual(connectivity_matrix['Ca']['Ca']['edge'], 0, "Ca should not be edge-sharing")
self.assertEqual(connectivity_matrix['Ca']['Ca']['face'], 2, "Ca should be face-sharing")
self.assertEqual(connectivity_matrix['Fe']['Fe']['point'], 4, "Fe should be point-sharing")
self.assertEqual(connectivity_matrix['Fe']['Fe']['edge'], 4, "Fe should be edge-sharing")
self.assertEqual(connectivity_matrix['Fe']['Fe']['face'], 0, "Fe should not be face-sharing")
self.assertEqual(connectivity_matrix['Ca']['Fe']['point'], 8, "Fe and Ca should be point-sharing")
self.assertEqual(connectivity_matrix['Ca']['Fe']['edge'], 5, "Fe and Ca should be edge-sharing")
self.assertEqual(connectivity_matrix['Ca']['Fe']['face'], 2, "Fe and Ca should be face-sharing")
for connectivity_type in connectivity_matrix['Ca']['Fe'].keys():
self.assertEqual(connectivity_matrix['Ca']['Fe'][connectivity_type]*cafe2o4_structure.composition['Ca'],
connectivity_matrix['Fe']['Ca'][connectivity_type]*cafe2o4_structure.composition['Fe'],
"Total number of sharing instances between 'Ca' and 'Fe' "
"should be same as total number of sharing"
"instances between 'Fe' and 'Ca'")
for poly in connectivity_polyhedra:
self.assertIsInstance(poly, Polyhedra, "List of polyhedra includes a non-polyhedra element")
def test_calcite(self):
"""Testing coordination and connectivity matrix for calcite structure CaCO3"""
calcite_structure = Structure.from_file('test_structures/calcite.cif', True, True)
cn_finder = EffectiveCoordFinder(calcite_structure)
cns = cn_finder.get_avg_cn(radius=2.5, anions=['O'])
for cation in cns:
self.assertTrue(cation == "Ca" or cation == "C", "Ca and C should be the only ions in CaCO3")
if cation == 'Ca':
self.assertEqual(round(cns[cation]), 6, "K should be 8-fold coordinated")
if cation == 'C':
self.assertEqual(round(cns[cation]), 3, "S should be 4-fold coordinated")
central_species = ['Ca', 'C']
peripheral_species = ['O']
connectivity_matrix, connectivity_polyhedra = \
get_connectivity_matrix(calcite_structure, False, 2.6, peripheral_species, central_species)
self.assertIn('Ca', connectivity_matrix.keys(), "Ca polyhedra not found in CaCO3 calcite matrix")
self.assertIn('C', connectivity_matrix.keys(), "CO3 polyanions not found in CaCO3 calcite matrix")
self.assertEqual(connectivity_matrix['Ca']['Ca']['point'], 6, "Ca should be point-sharing")
self.assertEqual(connectivity_matrix['Ca']['Ca']['edge'], 0, "Ca should not be edge-sharing")
self.assertEqual(connectivity_matrix['Ca']['Ca']['face'], 0, "Ca should not be face-sharing")
self.assertEqual(connectivity_matrix['C']['C']['point'], 0, "C should be isolated")
self.assertEqual(connectivity_matrix['C']['C']['edge'], 0, "C should be isolated")
self.assertEqual(connectivity_matrix['C']['C']['face'], 0, "C should be isolated")
self.assertEqual(connectivity_matrix['Ca']['C']['point'], 6, "Ca and C should be point-sharing")
self.assertEqual(connectivity_matrix['Ca']['C']['edge'], 0, "Ca and C should not be edge-sharing")
self.assertEqual(connectivity_matrix['Ca']['C']['face'], 0, "Ca and C should not be face-sharing")
for connectivity_type in connectivity_matrix['Ca']['C'].keys():
self.assertEqual(connectivity_matrix['Ca']['C'][connectivity_type]*calcite_structure.composition['Ca'],
connectivity_matrix['C']['Ca'][connectivity_type]*calcite_structure.composition['C'],
"Total number of sharing instances between 'Ca' and 'C' "
"should be same as total number of sharing"
"instances between 'C' and 'Ca'")
for poly in connectivity_polyhedra:
self.assertIsInstance(poly, Polyhedra, "List of polyhedra includes a non-polyhedra element")
def test_corundum(self):
"""Testing coordination and connectivity matrix for corundum structure Cr2O3"""
corundum_structure = Structure.from_file('test_structures/corundum.cif', True, True)
cn_finder = EffectiveCoordFinder(corundum_structure)
cns = cn_finder.get_avg_cn(radius=2.6, anions=['O'])
for cation in cns:
self.assertTrue(cation == "Cr", "Cr should be the only ions in corundum Cr2O3")
if cation == 'Cr':
self.assertEqual(round(cns[cation]), 6, "Cr should be 6-fold coordinated")
central_species = ['Cr']
peripheral_species = ['O']
connectivity_matrix, connectivity_polyhedra = \
get_connectivity_matrix(corundum_structure, False, 2.6, peripheral_species, central_species)
self.assertIn('Cr', connectivity_matrix.keys(), "Cr polyhedra not found in corundum Cr2O3 matrix")
self.assertEqual(connectivity_matrix['Cr']['Cr']['point'], 9, "Cr should be point-sharing")
self.assertEqual(connectivity_matrix['Cr']['Cr']['edge'], 3, "Cr should be edge-sharing")
self.assertEqual(connectivity_matrix['Cr']['Cr']['face'], 1, "Cr should be face-sharing")
for poly in connectivity_polyhedra:
self.assertIsInstance(poly, Polyhedra, "List of polyhedra includes a non-polyhedra element")
def test_hexagonal(self):
"""Testing coordination and connectivity matrix for hexagonal structure BaNiO3"""
hexagonal_structure = Structure.from_file('test_structures/HexagonalABX3.cif', True, True)
cn_finder = EffectiveCoordFinder(hexagonal_structure)
cns = cn_finder.get_avg_cn(radius=3.2, anions=['O'])
for cation in cns:
self.assertTrue(cation == "Ba" or cation == "Ni",
"Ba and Ni should be the only ions in hexagonal BaNiO3")
if cation == 'Ba':
self.assertEqual(round(cns[cation]), 12,
"Ba should be 12-fold coordinated (effective coordination underestimates)")
if cation == 'Ni':
self.assertEqual(round(cns[cation]), 6, "Si should be 6-fold coordinated")
central_species = ['Ba', 'Ni']
peripheral_species = ['O']
connectivity_matrix, connectivity_polyhedra = \
get_connectivity_matrix(hexagonal_structure, False, 3.2, peripheral_species, central_species)
self.assertIn('Ba', connectivity_matrix.keys(), "Ba cations not found in matrix")
self.assertIn('Ni', connectivity_matrix.keys(), "Ni cations not found in matrix")
self.assertEqual(connectivity_matrix['Ba']['Ba']['point'], 6, "Ba should be point-sharing")
self.assertEqual(connectivity_matrix['Ba']['Ba']['edge'], 0, "Ba should not be edge-sharing")
self.assertEqual(connectivity_matrix['Ba']['Ba']['face'], 8, "Ba should be face-sharing")
self.assertEqual(connectivity_matrix['Ni']['Ni']['point'], 0, "Ni should not be point-sharing")
self.assertEqual(connectivity_matrix['Ni']['Ni']['edge'], 0, "Ni should not be edge-sharing")
self.assertEqual(connectivity_matrix['Ni']['Ni']['face'], 2, "Ni should only be face-sharing")
self.assertEqual(connectivity_matrix['Ni']['Ba']['point'], 6, "Ni and Ba should be point-sharing")
self.assertEqual(connectivity_matrix['Ni']['Ba']['edge'], 0, "Ni and Ba should not be edge-sharing")
self.assertEqual(connectivity_matrix['Ni']['Ba']['face'], 6, "Ni and Ba should be face-sharing")
for connectivity_type in connectivity_matrix['Ba']['Ni'].keys():
self.assertEqual(connectivity_matrix['Ba']['Ni'][connectivity_type]*hexagonal_structure.composition['Ba'],
connectivity_matrix['Ni']['Ba'][connectivity_type]*hexagonal_structure.composition['Ni'],
"Total number of sharing instances between 'Ba' and 'Ni' "
"should be same as total number of sharing"
"instances between 'Ni' and 'Ba'")
for poly in connectivity_polyhedra:
self.assertIsInstance(poly, Polyhedra, "List of polyhedra includes a non-polyhedra element")
def test_k2nif4(self):
"""Testing coordination and connectivity matrix for K2NiF4 structure"""
k2nif4_structure = Structure.from_file('test_structures/K2NiF4.cif', True, True)
cn_finder = EffectiveCoordFinder(k2nif4_structure)
cns = cn_finder.get_avg_cn(radius=3.0, anions=['F'])
for cation in cns:
self.assertTrue(cation == "K" or cation == "Ni",
"K and Ni should be the only cations in K2NiF4 structure")
if cation == 'K':
self.assertLessEqual(round(cns[cation]), 9,
"Zr should be 9-fold coordinated (effective coordination underestimates)")
if cation == 'Ni':
self.assertEqual(round(cns[cation]), 6, "Ni should be 6-fold coordinated")
central_species = ['K', 'Ni']
peripheral_species = ['F']
connectivity_matrix, connectivity_polyhedra = \
get_connectivity_matrix(k2nif4_structure, False, 3.0, peripheral_species, central_species)
self.assertIn('K', connectivity_matrix.keys(), "K cations not found in matrix")
self.assertIn('Ni', connectivity_matrix.keys(), "Ni cations not found in matrix")
self.assertEqual(connectivity_matrix['K']['K']['point'], 8, "K should be point-sharing")
self.assertEqual(connectivity_matrix['K']['K']['edge'], 4, "K should not be edge-sharing")
self.assertEqual(connectivity_matrix['K']['K']['face'], 5, "K should not be face-sharing")
self.assertEqual(connectivity_matrix['Ni']['Ni']['point'], 4, "Ni should be point-sharing")
self.assertEqual(connectivity_matrix['Ni']['Ni']['edge'], 0, "Ni should not be edge-sharing")
self.assertEqual(connectivity_matrix['Ni']['Ni']['face'], 0, "Ni should not be face-sharing")
self.assertEqual(connectivity_matrix['Ni']['K']['point'], 2, "Ni and K should not be point-sharing")
self.assertEqual(connectivity_matrix['Ni']['K']['edge'], 0, "Ni and K should not be edge-sharing")
self.assertEqual(connectivity_matrix['Ni']['K']['face'], 8, "Ni and K should only be face-sharing")
for connectivity_type in connectivity_matrix['K']['Ni'].keys():
self.assertEqual(connectivity_matrix['K']['Ni'][connectivity_type]*k2nif4_structure.composition['K'],
connectivity_matrix['Ni']['K'][connectivity_type]*k2nif4_structure.composition['Ni'],
"Total number of sharing instances between 'K' and 'Ni' "
"should be same as total number of sharing"
"instances between 'Ni' and 'K'")
for poly in connectivity_polyhedra:
self.assertIsInstance(poly, Polyhedra, "List of polyhedra includes a non-polyhedra element")
def test_olivine(self):
"""Testing coordination and connectivity matrix for olivine structure Fe2SiO4"""
olivine_structure = Structure.from_file('test_structures/olivine.cif', True, True)
cn_finder = EffectiveCoordFinder(olivine_structure)
cns = cn_finder.get_avg_cn(radius=2.6, anions=['O'])
for cation in cns:
self.assertTrue(cation == "Fe" or cation == "Si",
"Fe and Si cations should be the only cations in olivine Fe2SiO4")
if cation == 'Fe':
self.assertEqual(round(cns[cation]), 6, "Fe should be 6-fold coordinated")
if cation == 'Si':
self.assertEqual(round(cns[cation]), 4, "Si should be 4-fold coordinated")
central_species = ['Fe', 'Si']
peripheral_species = ['O']
connectivity_matrix, connectivity_polyhedra = \
get_connectivity_matrix(olivine_structure, False, 2.6, peripheral_species, central_species)
self.assertIn('Fe', connectivity_matrix.keys(), "Fe polyhedra not found in matrix")
self.assertIn('Si', connectivity_matrix.keys(), "Si polyhedra not found in matrix")
self.assertEqual(connectivity_matrix['Fe']['Fe']['point'], 6, "Fe should be point-sharing")
self.assertEqual(connectivity_matrix['Fe']['Fe']['edge'], 3, "Fe should be edge-sharing")
self.assertEqual(connectivity_matrix['Fe']['Fe']['face'], 0, "Fe should be face-sharing")
self.assertEqual(connectivity_matrix['Si']['Si']['point'], 0, "Si should be isolated")
self.assertEqual(connectivity_matrix['Si']['Si']['edge'], 0, "Si should be isolated")
self.assertEqual(connectivity_matrix['Si']['Si']['face'], 0, "Si should be isolated")
self.assertEqual(connectivity_matrix['Si']['Fe']['point'], 6, "Fe and Si should be point-sharing")
self.assertEqual(connectivity_matrix['Si']['Fe']['edge'], 3, "Fe and Si should be edge-sharing")
self.assertEqual(connectivity_matrix['Si']['Fe']['face'], 0, "Fe and Si should not be face-sharing")
for connectivity_type in connectivity_matrix['Fe']['Si'].keys():
self.assertEqual(connectivity_matrix['Fe']['Si'][connectivity_type]*olivine_structure.composition['K'],
connectivity_matrix['Si']['Fe'][connectivity_type]*olivine_structure.composition['S'],
"Total number of sharing instances between 'Fe' and 'Si' "
"should be same as total number of sharing"
"instances between 'Si' and 'Fe'")
for poly in connectivity_polyhedra:
self.assertIsInstance(poly, Polyhedra, "List of polyhedra includes a non-polyhedra element")
def test_phenacite(self):
"""Testing coordination and connectivity matrix for phenacite structure Be2SiO4"""
phenacite_structure = Structure.from_file('test_structures/phenacite.cif', True, True)
cn_finder = EffectiveCoordFinder(phenacite_structure)
cns = cn_finder.get_avg_cn(radius=2.6, anions=['O'])
for cation in cns:
self.assertTrue(cation == "Be" or cation == "Si",
"Be and Si polyanions should be the only ions in phenacite Be2SiO4")
if cation == 'Be':
self.assertEqual(round(cns[cation]), 4, "Be should be 4-fold coordinated")
if cation == 'Si':
self.assertEqual(round(cns[cation]), 4, "Si should be 4-fold coordinated")
central_species = ['Be', 'Si']
peripheral_species = ['O']
connectivity_matrix, connectivity_polyhedra = \
get_connectivity_matrix(phenacite_structure, False, 2.6, peripheral_species, central_species)
self.assertIn('Be', connectivity_matrix.keys(), "Be cations not found in matrix")
self.assertIn('Si', connectivity_matrix.keys(), "Si cations not found in matrix")
self.assertEqual(connectivity_matrix['Be']['Be']['point'], 4, "Be should be point-sharing")
self.assertEqual(connectivity_matrix['Be']['Be']['edge'], 0, "Be should not be edge-sharing")
self.assertEqual(connectivity_matrix['Be']['Be']['face'], 0, "Be should not be face-sharing")
self.assertEqual(connectivity_matrix['Si']['Si']['point'], 0, "Si should be isolated")
self.assertEqual(connectivity_matrix['Si']['Si']['edge'], 0, "Si should be isolated")
self.assertEqual(connectivity_matrix['Si']['Si']['face'], 0, "Si should be isolated")
self.assertEqual(connectivity_matrix['Be']['Si']['point'], 4, "Be and S should be point-sharing")
self.assertEqual(connectivity_matrix['Be']['Si']['edge'], 0, "Be and S should not be edge-sharing")
self.assertEqual(connectivity_matrix['Be']['Si']['face'], 0, "Be and S should not be face-sharing")
for connectivity_type in connectivity_matrix['Be']['Si'].keys():
self.assertEqual(connectivity_matrix['Be']['Si'][connectivity_type]*phenacite_structure.composition['Be'],
connectivity_matrix['Si']['Be'][connectivity_type]*phenacite_structure.composition['Si'],
"Total number of sharing instances between 'Be' and 'Si' "
"should be same as total number of sharing"
"instances between 'Si' and 'Be'")
for poly in connectivity_polyhedra:
self.assertIsInstance(poly, Polyhedra, "List of polyhedra includes a non-polyhedra element")
def test_pyroxene(self):
"""Testing coordination and connectivity matrix for pyroxene structure Mg16Si16O48"""
pyroxene_structure = Structure.from_file('test_structures/pyroxene.cif', True, True)
cn_finder = EffectiveCoordFinder(pyroxene_structure)
cns = cn_finder.get_avg_cn(radius=2.6, anions=['O'])
for cation in cns:
self.assertTrue(cation == "Mg" or cation == "Si", "Mg and Si should be the only ions in pyroxene MgSiO4")
if cation == 'Mg':
self.assertLessEqual(round(cns[cation]), 6,
"Mg should be 6-fold coordinated (effective coordination underestimates)")
if cation == 'Si':
self.assertEqual(round(cns[cation]), 4, "Si should be 4-fold coordinated")
central_species = ['Mg', 'Si']
peripheral_species = ['O']
connectivity_matrix, connectivity_polyhedra = \
get_connectivity_matrix(pyroxene_structure, False, 2.6, peripheral_species, central_species)
self.assertIn('Mg', connectivity_matrix.keys(), "Mg cations not found in matrix")
self.assertIn('Si', connectivity_matrix.keys(), "Si cations not found in matrix")
self.assertEqual(connectivity_matrix['Mg']['Mg']['point'], 0, "Mg should be point-sharing")
self.assertEqual(connectivity_matrix['Mg']['Mg']['edge'], 4, "Mg should not be edge-sharing")
self.assertEqual(connectivity_matrix['Mg']['Mg']['face'], 0, "Mg should not be face-sharing")
self.assertEqual(connectivity_matrix['Si']['Si']['point'], 2, "Ti should be point-sharing")
self.assertEqual(connectivity_matrix['Si']['Si']['edge'], 0, "Ti should not be edge-sharing")
self.assertEqual(connectivity_matrix['Si']['Si']['face'], 0, "Ti should not be face-sharing")
self.assertEqual(connectivity_matrix['Si']['Mg']['point'], 6, "Ti and Mg should not be point-sharing")
self.assertEqual(connectivity_matrix['Si']['Mg']['edge'], 0.5, "Ti and Mg should not be edge-sharing")
self.assertEqual(connectivity_matrix['Si']['Mg']['face'], 0, "Ti and Mg should only be face-sharing")
for connectivity_type in connectivity_matrix['Mg']['Si'].keys():
self.assertEqual(connectivity_matrix['Mg']['Si'][connectivity_type]*pyroxene_structure.composition['Mg'],
connectivity_matrix['Si']['Mg'][connectivity_type]*pyroxene_structure.composition['Si'],
"Total number of sharing instances between 'Mg' and 'Si' "
"should be same as total number of sharing"
"instances between 'Si' and 'Mg'")
for poly in connectivity_polyhedra:
self.assertIsInstance(poly, Polyhedra, "List of polyhedra includes a non-polyhedra element")
def test_reo3(self):
"""Testing coordination and connectivity matrix for ReO3 structure"""
reo3_structure = Structure.from_file('test_structures/ReO3.cif', True, True)
cn_finder = EffectiveCoordFinder(reo3_structure)
cns = cn_finder.get_avg_cn(radius=2.6, anions=['O'])
for cation in cns:
self.assertTrue(cation == "Re", "Re should be the only ions in ReO3 structure")
if cation == 'Re':
self.assertEqual(round(cns[cation]), 6, "Re should be 6-fold coordinated")
central_species = ['Re']
peripheral_species = ['O']
connectivity_matrix, connectivity_polyhedra = \
get_connectivity_matrix(reo3_structure, False, 2.6, peripheral_species, central_species)
self.assertIn('Re', connectivity_matrix.keys(), "Re polyhedra not found in ReO3 matrix")
self.assertEqual(connectivity_matrix['Re']['Re']['point'], 6, "Re should be point-sharing")
self.assertEqual(connectivity_matrix['Re']['Re']['edge'], 0, "Re should not be edge-sharing")
self.assertEqual(connectivity_matrix['Re']['Re']['face'], 0, "Re should not be face-sharing")
for poly in connectivity_polyhedra:
self.assertIsInstance(poly, Polyhedra, "List of polyhedra includes a non-polyhedra element")
def test_rocksalt(self):
"""Testing coordination and connectivity matrix for rock salt structure NaCl"""
rocksalt_structure = Structure.from_file('test_structures/rocksalt.cif', True, True)
cn_finder = EffectiveCoordFinder(rocksalt_structure)
cns = cn_finder.get_avg_cn(radius=3.0, anions=['Cl'])
for cation in cns:
self.assertTrue(cation == "Na", "Na should be the only ions in rocksalt NaCl structure")
if cation == 'Na':
self.assertEqual(round(cns[cation]), 6, "Na should be 6-fold coordinated")
central_species = ['Na']
peripheral_species = ['Cl']
connectivity_matrix, connectivity_polyhedra = \
get_connectivity_matrix(rocksalt_structure, False, 3.0, peripheral_species, central_species)
self.assertIn('Na', connectivity_matrix.keys(), "Na polyhedra not found in NaCl matrix")
self.assertEqual(connectivity_matrix['Na']['Na']['point'], 6, "Na should not be point-sharing")
self.assertEqual(connectivity_matrix['Na']['Na']['edge'], 12, "Na should be edge-sharing")
self.assertEqual(connectivity_matrix['Na']['Na']['face'], 0, "Na should not be face-sharing")
for poly in connectivity_polyhedra:
self.assertIsInstance(poly, Polyhedra, "List of polyhedra includes a non-polyhedra element")
def test_rutile(self):
"""Testing coordination and connectivity matrix for rutile structure TiO2"""
rutile_structure = Structure.from_file('test_structures/rutile.cif', True, True)
cn_finder = EffectiveCoordFinder(rutile_structure)
cns = cn_finder.get_avg_cn(radius=2.6, anions=['O'])
for cation in cns:
self.assertTrue(cation == "Ti", "Ti should be the only ions in rocksalt NaCl structure")
if cation == 'Ti':
self.assertEqual(round(cns[cation]), 6, "Ti should be 6-fold coordinated")
central_species = ['Ti']
peripheral_species = ['O']
connectivity_matrix, connectivity_polyhedra = \
get_connectivity_matrix(rutile_structure, False, 2.6, peripheral_species, central_species)
self.assertIn('Ti', connectivity_matrix.keys(), "Ti polyhedra not found in NaCl matrix")
self.assertEqual(connectivity_matrix['Ti']['Ti']['point'], 4, "Ti should not be point-sharing")
self.assertEqual(connectivity_matrix['Ti']['Ti']['edge'], 4, "Ti should be edge-sharing")
self.assertEqual(connectivity_matrix['Ti']['Ti']['face'], 0, "Ti should not be face-sharing")
for poly in connectivity_polyhedra:
self.assertIsInstance(poly, Polyhedra, "List of polyhedra includes a non-polyhedra element")
def test_scheelite(self):
"""Testing coordination and connectivity matrix for scheelite structure CaWO4"""
scheelite_structure = Structure.from_file('test_structures/scheelite.cif', True, True)
cn_finder = EffectiveCoordFinder(scheelite_structure)
cns = cn_finder.get_avg_cn(radius=2.6, anions=['O'])
for cation in cns:
self.assertTrue(cation == "Ca" or cation == "W", "Ca and W should be the only ions in scheelite CaWO4")
if cation == 'Ca':
self.assertEqual(round(cns[cation]), 8,
"Ca should be 8-fold coordinated (effective coordination underestimates)")
if cation == 'W':
self.assertEqual(round(cns[cation]), 4, "W should be 4-fold coordinated")
central_species = ['Ca', 'W']
peripheral_species = ['O']
connectivity_matrix, connectivity_polyhedra = \
get_connectivity_matrix(scheelite_structure, False, 2.6, peripheral_species, central_species)
self.assertIn('Ca', connectivity_matrix.keys(), "Ca cations not found in matrix")
self.assertIn('W', connectivity_matrix.keys(), "W cations not found in matrix")
self.assertEqual(connectivity_matrix['Ca']['Ca']['point'], 0, "Ca should not be point-sharing")
self.assertEqual(connectivity_matrix['Ca']['Ca']['edge'], 4, "Ca should be be edge-sharing")
self.assertEqual(connectivity_matrix['Ca']['Ca']['face'], 0, "Ca should not be face-sharing")
self.assertEqual(connectivity_matrix['W']['W']['point'], 0, "W should not be point-sharing")
self.assertEqual(connectivity_matrix['W']['W']['edge'], 0, "W should not be edge-sharing")
self.assertEqual(connectivity_matrix['W']['W']['face'], 0, "W should not be face-sharing")
self.assertEqual(connectivity_matrix['Ca']['W']['point'], 8, "Ca and W should only be point-sharing")
self.assertEqual(connectivity_matrix['Ca']['W']['edge'], 0, "Ca and W should not be edge-sharing")
self.assertEqual(connectivity_matrix['Ca']['W']['face'], 0, "Ca and W should not be face-sharing")
for connectivity_type in connectivity_matrix['Ca']['W'].keys():
self.assertEqual(connectivity_matrix['Ca']['W'][connectivity_type]*scheelite_structure.composition['Ca'],
connectivity_matrix['W']['Ca'][connectivity_type]*scheelite_structure.composition['W'],
"Total number of sharing instances between 'Ca' and 'W' "
"should be same as total number of sharing"
"instances between 'W' and 'Ca'")
for poly in connectivity_polyhedra:
self.assertIsInstance(poly, Polyhedra, "List of polyhedra includes a non-polyhedra element")
def test_sio2(self):
"""Testing coordination and connectivity matrix for SiO2 structure"""
sio2_structure = Structure.from_file('test_structures/SiO2.cif', True, True)
cn_finder = EffectiveCoordFinder(sio2_structure)
cns = cn_finder.get_avg_cn(radius=2.6, anions=['O'])
for cation in cns:
self.assertTrue(cation == "Si", "Si should be the only ions in SiO2 structure")
if cation == 'Si':
self.assertEqual(round(cns[cation]), 4, "Si should be 6-fold coordinated")
central_species = ['Si']
peripheral_species = ['O']
connectivity_matrix, connectivity_polyhedra = \
get_connectivity_matrix(sio2_structure, False, 2.6, peripheral_species, central_species)
self.assertIn('Si', connectivity_matrix.keys(), "Si polyhedra not found in SiO2 matrix")
self.assertEqual(connectivity_matrix['Si']['Si']['point'], 4, "Si should not be point-sharing")
self.assertEqual(connectivity_matrix['Si']['Si']['edge'], 0, "Si should be edge-sharing")
self.assertEqual(connectivity_matrix['Si']['Si']['face'], 0, "Si should not be face-sharing")
for poly in connectivity_polyhedra:
self.assertIsInstance(poly, Polyhedra, "List of polyhedra includes a non-polyhedra element")
def test_spinel(self):
"""Testing coordination and connectivity matrix for spinel structure MgAl2O4"""
spinel_structure = Structure.from_file('test_structures/spinel.cif', True, True)
cn_finder = EffectiveCoordFinder(spinel_structure)
cns = cn_finder.get_avg_cn(radius=2.6, anions=['O'])
for cation in cns:
self.assertTrue(cation == "Mg" or cation == "Al",
"Mg and Al polyanions should be the only ions in spinel MgAl2O4")
if cation == 'Mg':
self.assertEqual(round(cns[cation]), 4, "Mg should be 4-fold coordinated")
if cation == 'Al':
self.assertEqual(round(cns[cation]), 6, "Al should be 4-fold coordinated")
central_species = ['Mg', 'Al']
peripheral_species = ['O']
connectivity_matrix, connectivity_polyhedra = \
get_connectivity_matrix(spinel_structure, False, 2.6, peripheral_species, central_species)
self.assertIn('Mg', connectivity_matrix.keys(), "Mg cations not found in matrix")
self.assertIn('Al', connectivity_matrix.keys(), "Al cations not found in matrix")
self.assertEqual(connectivity_matrix['Mg']['Mg']['point'], 0, "Mg should be point-sharing")
self.assertEqual(connectivity_matrix['Mg']['Mg']['edge'], 0, "Mg should not be edge-sharing")
self.assertEqual(connectivity_matrix['Mg']['Mg']['face'], 0, "Mg should not be face-sharing")
self.assertEqual(connectivity_matrix['Al']['Al']['point'], 0, "Al should be isolated")
self.assertEqual(connectivity_matrix['Al']['Al']['edge'], 6, "Al should be isolated")
self.assertEqual(connectivity_matrix['Al']['Al']['face'], 0, "Al should be isolated")
self.assertEqual(connectivity_matrix['Mg']['Al']['point'], 12, "Mg and Al should be point-sharing")
self.assertEqual(connectivity_matrix['Mg']['Al']['edge'], 0, "Mg and Al should not be edge-sharing")
self.assertEqual(connectivity_matrix['Mg']['Al']['face'], 0, "Mg and Al should not be face-sharing")
for connectivity_type in connectivity_matrix['Mg']['Al'].keys():
self.assertEqual(connectivity_matrix['Mg']['Al'][connectivity_type]*spinel_structure.composition['Mg'],
connectivity_matrix['Al']['Mg'][connectivity_type]*spinel_structure.composition['Al'],
"Total number of sharing instances between 'Mg' and 'Al' "
"should be same as total number of sharing"
"instances between 'Al' and 'Mg'")
for poly in connectivity_polyhedra:
self.assertIsInstance(poly, Polyhedra, "List of polyhedra includes a non-polyhedra element")
def test_perovskite(self):
"""Testing coordination and connectivity matrix for perovskite structure SrTiO3"""
perovskite_structure = Structure.from_file('test_structures/perovskite.cif', True, True)
cn_finder = EffectiveCoordFinder(perovskite_structure)
cns = cn_finder.get_avg_cn(radius=3.2, anions=['O'])
for cation in cns:
self.assertTrue(cation == "Sr" or cation == "Ti",
"Be and Si polyanions should be the only ions in phenacite Be2SiO4")
if cation == 'Sr':
self.assertLessEqual(round(cns[cation]), 12,
"Sr should be 12-fold coordinated (effective coordination underestimates)")
if cation == 'Ti':
self.assertEqual(round(cns[cation]), 6, "Ti should be 6-fold coordinated")
central_species = ['Sr', 'Ti']
peripheral_species = ['O']
connectivity_matrix, connectivity_polyhedra = \
get_connectivity_matrix(perovskite_structure, False, 3.2, peripheral_species, central_species)
self.assertIn('Sr', connectivity_matrix.keys(), "Sr cations not found in matrix")
self.assertIn('Ti', connectivity_matrix.keys(), "Ti cations not found in matrix")
self.assertEqual(connectivity_matrix['Sr']['Sr']['point'], 12, "Sr should be point-sharing")
self.assertEqual(connectivity_matrix['Sr']['Sr']['edge'], 0, "Sr should not be edge-sharing")
self.assertEqual(connectivity_matrix['Sr']['Sr']['face'], 6, "Sr should not be face-sharing")
self.assertEqual(connectivity_matrix['Ti']['Ti']['point'], 6, "Ti should be point-sharing")
self.assertEqual(connectivity_matrix['Ti']['Ti']['edge'], 0, "Ti should not be edge-sharing")
self.assertEqual(connectivity_matrix['Ti']['Ti']['face'], 0, "Ti should not be face-sharing")
self.assertEqual(connectivity_matrix['Ti']['Sr']['point'], 0, "Sr and Ti should not be point-sharing")
self.assertEqual(connectivity_matrix['Ti']['Sr']['edge'], 0, "Sr and Ti should not be edge-sharing")
self.assertEqual(connectivity_matrix['Ti']['Sr']['face'], 8, "Sr and Ti should only be face-sharing")
for connectivity_type in connectivity_matrix['Sr']['Ti'].keys():
self.assertEqual(connectivity_matrix['Sr']['Ti'][connectivity_type]*perovskite_structure.composition['Sr'],
connectivity_matrix['Ti']['Sr'][connectivity_type]*perovskite_structure.composition['Ti'],
"Total number of sharing instances between 'Sr' and 'Ti' "
"should be same as total number of sharing"
"instances between 'Ti' and 'Sr'")
for poly in connectivity_polyhedra:
self.assertIsInstance(poly, Polyhedra, "List of polyhedra includes a non-polyhedra element")
def test_zircon(self):
"""Testing coordination and connectivity matrix for zircon structure ZrSiO4"""
zircon_structure = Structure.from_file('test_structures/zircon.cif', True, True)
cn_finder = EffectiveCoordFinder(zircon_structure)
cns = cn_finder.get_avg_cn(radius=2.6, anions=['O'])
for cation in cns:
self.assertTrue(cation == "Zr" or cation == "Si",
"Zr and Si polyanions should be the only ions in zircon ZrSiO4")
if cation == 'Zr':
self.assertLessEqual(round(cns[cation]), 8,
"Zr should be 8-fold coordinated (effective coordination underestimates)")
if cation == 'Si':
self.assertEqual(round(cns[cation]), 4, "Si should be 4-fold coordinated")
central_species = ['Zr', 'Si']
peripheral_species = ['O']
connectivity_matrix, connectivity_polyhedra = \
get_connectivity_matrix(zircon_structure, False, 2.6, peripheral_species, central_species)
self.assertIn('Zr', connectivity_matrix.keys(), "Zr cations not found in matrix")
self.assertIn('Si', connectivity_matrix.keys(), "Si cations not found in matrix")
self.assertEqual(connectivity_matrix['Zr']['Zr']['point'], 0, "Zr should not be point-sharing")
self.assertEqual(connectivity_matrix['Zr']['Zr']['edge'], 4, "Zr should be edge-sharing")
self.assertEqual(connectivity_matrix['Zr']['Zr']['face'], 0, "Zr should not be face-sharing")
self.assertEqual(connectivity_matrix['Si']['Si']['point'], 0, "Si should be point-sharing")
self.assertEqual(connectivity_matrix['Si']['Si']['edge'], 0, "Si should not be edge-sharing")
self.assertEqual(connectivity_matrix['Si']['Si']['face'], 0, "Si should not be face-sharing")
self.assertEqual(connectivity_matrix['Si']['Zr']['point'], 4, "Zr and Si should be point-sharing")
self.assertEqual(connectivity_matrix['Si']['Zr']['edge'], 2, "Zr and Si should be edge-sharing")
self.assertEqual(connectivity_matrix['Si']['Zr']['face'], 0, "Zr and Si should not be face-sharing")
for connectivity_type in connectivity_matrix['Zr']['Si'].keys():
self.assertEqual(connectivity_matrix['Zr']['Si'][connectivity_type]*zircon_structure.composition['Zr'],
connectivity_matrix['Si']['Zr'][connectivity_type]*zircon_structure.composition['Si'],
"Total number of sharing instances between 'Zr' and 'Si' "
"should be same as total number of sharing"
"instances between 'Si' and 'Zr'")
for poly in connectivity_polyhedra:
self.assertIsInstance(poly, Polyhedra, "List of polyhedra includes a non-polyhedra element")
def test_znso4(self):
"""Testing coordination and connectivity matrix for ZnSO4 structure"""
znso4_structure = Structure.from_file('test_structures/ZnSO4.cif', True, True)
cn_finder = EffectiveCoordFinder(znso4_structure)
cns = cn_finder.get_avg_cn(radius=2.6, anions=['O'])
for cation in cns:
self.assertTrue(cation == "Zn" or cation == "S", "Zn and S should be the only ions in zircon ZrSiO4")
if cation == 'Zn':
self.assertLessEqual(round(cns[cation]), 6,
"Zn should be 6-fold coordinated (effective coordination underestimates)")
if cation == 'S':
self.assertEqual(round(cns[cation]), 4, "S should be 4-fold coordinated")
central_species = ['Zn', 'S']
peripheral_species = ['O']
connectivity_matrix, connectivity_polyhedra = \
get_connectivity_matrix(znso4_structure, False, 2.6, peripheral_species, central_species)
self.assertIn('Zn', connectivity_matrix.keys(), "Zn cations not found in matrix")
self.assertIn('S', connectivity_matrix.keys(), "S cations not found in matrix")
self.assertEqual(connectivity_matrix['Zn']['Zn']['point'], 0, "Zn should not be point-sharing")
self.assertEqual(connectivity_matrix['Zn']['Zn']['edge'], 2, "Zn should be edge-sharing")
self.assertEqual(connectivity_matrix['Zn']['Zn']['face'], 0, "Zn should not be face-sharing")
self.assertEqual(connectivity_matrix['S']['S']['point'], 0, "S should be isolated")
self.assertEqual(connectivity_matrix['S']['S']['edge'], 0, "S should be isolated")
self.assertEqual(connectivity_matrix['S']['S']['face'], 0, "S should be isolated")
self.assertEqual(connectivity_matrix['S']['Zn']['point'], 6, "S and Zn should only be point-sharing")
self.assertEqual(connectivity_matrix['S']['Zn']['edge'], 0, "S and Zn should not be edge-sharing")
self.assertEqual(connectivity_matrix['S']['Zn']['face'], 0, "S and Zn should be face-sharing")
for connectivity_type in connectivity_matrix['Zn']['S'].keys():
self.assertEqual(connectivity_matrix['Zn']['S'][connectivity_type]*znso4_structure.composition['Zn'],
connectivity_matrix['S']['Zn'][connectivity_type]*znso4_structure.composition['S'],
"Total number of sharing instances between 'Zn' and 'S' "
"should be same as total number of sharing"
"instances between 'S' and 'Zn'")
for poly in connectivity_polyhedra:
self.assertIsInstance(poly, Polyhedra, "List of polyhedra includes a non-polyhedra element")
if __name__ == '__main__':
unittest.main()
|
tchen0965/structural_descriptors_repo
|
examples/structures_test.py
|
Python
|
mit
| 47,836
|
[
"pymatgen"
] |
3fc4c00a40157de745bb0692c4dcb5e7489d561d5e2cd1a1ae8a2157875585f1
|
# -*- coding: utf-8 -*-
import time
from datetime import datetime, timedelta
from dateutil.parser import parse as dateutil_parse
from loguru import logger
from sqlalchemy import Column, Date, DateTime, Integer, String, Table, Time, Unicode, and_, or_
from sqlalchemy.orm import relation
from sqlalchemy.schema import ForeignKey
from flexget import db_schema, plugin
from flexget.manager import Session
from flexget.terminal import console
from flexget.utils import requests
from flexget.utils.database import json_synonym
from flexget.utils.tools import split_title_year
Base = db_schema.versioned_base('api_trakt', 7)
AuthBase = db_schema.versioned_base('trakt_auth', 0)
logger = logger.bind(name='api_trakt')
# Production Site
CLIENT_ID = '57e188bcb9750c79ed452e1674925bc6848bd126e02bb15350211be74c6547af'
CLIENT_SECRET = 'db4af7531e8df678b134dbc22445a2c04ebdbdd7213be7f5b6d17dfdfabfcdc2'
API_URL = 'https://api.trakt.tv/'
PIN_URL = 'https://trakt.tv/pin/346'
# Oauth account authentication
class TraktUserAuth(AuthBase):
__tablename__ = 'trakt_user_auth'
account = Column(Unicode, primary_key=True)
access_token = Column(Unicode)
refresh_token = Column(Unicode)
created = Column(DateTime)
expires = Column(DateTime)
def __init__(self, account, access_token, refresh_token, created, expires):
self.account = account
self.access_token = access_token
self.refresh_token = refresh_token
self.expires = token_expire_date(expires)
self.created = token_created_date(created)
def token_expire_date(expires):
return datetime.now() + timedelta(seconds=expires)
def token_created_date(created):
return datetime.fromtimestamp(created)
def device_auth():
data = {'client_id': CLIENT_ID}
try:
r = requests.post(get_api_url('oauth/device/code'), data=data).json()
device_code = r['device_code']
user_code = r['user_code']
expires_in = r['expires_in']
interval = r['interval']
console(
'Please visit {0} and authorize Flexget. Your user code is {1}. Your code expires in '
'{2} minutes.'.format(r['verification_url'], user_code, expires_in / 60.0)
)
logger.debug('Polling for user authorization.')
data['code'] = device_code
data['client_secret'] = CLIENT_SECRET
end_time = time.time() + expires_in
console('Waiting...', end='')
# stop polling after expires_in seconds
while time.time() < end_time:
time.sleep(interval)
polling_request = requests.post(
get_api_url('oauth/device/token'), data=data, raise_status=False
)
if polling_request.status_code == 200: # success
return polling_request.json()
elif polling_request.status_code == 400: # pending -- waiting for user
console('...', end='')
elif polling_request.status_code == 404: # not found -- invalid device_code
raise plugin.PluginError('Invalid device code. Open an issue on Github.')
elif polling_request.status_code == 409: # already used -- user already approved
raise plugin.PluginError('User code has already been approved.')
elif polling_request.status_code == 410: # expired -- restart process
break
elif polling_request.status_code == 418: # denied -- user denied code
raise plugin.PluginError('User code has been denied.')
elif polling_request.status_code == 429: # polling too fast
logger.warning('Polling too quickly. Upping the interval. No action required.')
interval += 1
raise plugin.PluginError('User code has expired. Please try again.')
except requests.RequestException as e:
raise plugin.PluginError('Device authorization with Trakt.tv failed: {0}'.format(e))
def token_oauth(data):
try:
return requests.post(get_api_url('oauth/token'), data=data).json()
except requests.RequestException as e:
raise plugin.PluginError('Token exchange with trakt failed: {0}'.format(e))
def delete_account(account):
with Session() as session:
acc = session.query(TraktUserAuth).filter(TraktUserAuth.account == account).first()
if not acc:
raise plugin.PluginError('Account %s not found.' % account)
session.delete(acc)
def get_access_token(account, token=None, refresh=False, re_auth=False, called_from_cli=False):
"""
Gets authorization info from a pin or refresh token.
:param account: Arbitrary account name to attach authorization to.
:param unicode token: The pin or refresh token, as supplied by the trakt website.
:param bool refresh: If True, refresh the access token using refresh_token from db.
:param bool re_auth: If True, account is re-authorized even if it already exists in db.
:raises RequestException: If there is a network error while authorizing.
"""
data = {
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'redirect_uri': 'urn:ietf:wg:oauth:2.0:oob',
}
with Session() as session:
acc = session.query(TraktUserAuth).filter(TraktUserAuth.account == account).first()
if acc and datetime.now() < acc.expires and not refresh and not re_auth:
return acc.access_token
else:
if (
acc
and (refresh or datetime.now() >= acc.expires - timedelta(days=5))
and not re_auth
):
logger.debug('Using refresh token to re-authorize account {}.', account)
data['refresh_token'] = acc.refresh_token
data['grant_type'] = 'refresh_token'
token_dict = token_oauth(data)
elif token:
# We are only in here if a pin was specified, so it's safe to use console instead of logging
console(
'Warning: PIN authorization has been deprecated. Use Device Authorization instead.'
)
data['code'] = token
data['grant_type'] = 'authorization_code'
token_dict = token_oauth(data)
elif called_from_cli:
logger.debug(
'No pin specified for an unknown account {}. Attempting to authorize device.',
account,
)
token_dict = device_auth()
else:
raise plugin.PluginError(
'Account %s has not been authorized. See `flexget trakt auth -h` on how to.'
% account
)
try:
new_acc = TraktUserAuth(
account,
token_dict['access_token'],
token_dict['refresh_token'],
token_dict.get('created_at', time.time()),
token_dict['expires_in'],
)
session.merge(new_acc)
return new_acc.access_token
except requests.RequestException as e:
raise plugin.PluginError('Token exchange with trakt failed: {0}'.format(e))
def make_list_slug(name):
"""Return the slug for use in url for given list name."""
slug = name.lower()
# These characters are just stripped in the url
for char in '!@#$%^*()[]{}/=?+\\|':
slug = slug.replace(char, '')
# These characters get replaced
slug = slug.replace('&', 'and')
slug = slug.replace(' ', '-')
return slug
def get_session(account=None, token=None):
"""
Creates a requests session ready to talk to trakt API with FlexGet's api key.
Can also add user level authentication if `account` parameter is given.
:param account: An account authorized via `flexget trakt auth` CLI command. If given, returned session will be
authenticated for that account.
"""
# default to username if account name is not specified
session = requests.Session()
session.headers = {
'Content-Type': 'application/json',
'trakt-api-version': '2',
'trakt-api-key': CLIENT_ID,
}
if account:
access_token = get_access_token(account, token) if account else None
if access_token:
session.headers.update({'Authorization': 'Bearer %s' % access_token})
return session
def get_api_url(*endpoint):
"""
Get the address of a trakt API endpoint.
:param endpoint: Can by a string endpoint (e.g. 'sync/watchlist') or an iterable (e.g. ('sync', 'watchlist')
Multiple parameters can also be specified instead of a single iterable.
:returns: The absolute url to the specified API endpoint.
"""
if len(endpoint) == 1 and not isinstance(endpoint[0], str):
endpoint = endpoint[0]
# Make sure integer portions are turned into strings first too
url = API_URL + '/'.join(map(str, endpoint))
return url
@db_schema.upgrade('api_trakt')
def upgrade(ver, session):
if ver is None or ver <= 6:
raise db_schema.UpgradeImpossible
return ver
def get_entry_ids(entry):
"""Creates a trakt ids dict from id fields on an entry. Prefers already populated info over lazy lookups."""
ids = {}
for lazy in [False, True]:
if entry.get('trakt_movie_id', eval_lazy=lazy):
ids['trakt'] = entry['trakt_movie_id']
elif entry.get('trakt_show_id', eval_lazy=lazy):
ids['trakt'] = entry['trakt_show_id']
elif entry.get('trakt_episode_id', eval_lazy=lazy):
ids['trakt'] = entry['trakt_episode_id']
if entry.get('tmdb_id', eval_lazy=lazy):
ids['tmdb'] = entry['tmdb_id']
if entry.get('tvdb_id', eval_lazy=lazy):
ids['tvdb'] = entry['tvdb_id']
if entry.get('imdb_id', eval_lazy=lazy):
ids['imdb'] = entry['imdb_id']
if entry.get('tvrage_id', eval_lazy=lazy):
ids['tvrage'] = entry['tvrage_id']
if ids:
break
return ids
class TraktMovieTranslation(Base):
__tablename__ = 'trakt_movie_translations'
id = Column(Integer, primary_key=True, autoincrement=True)
language = Column(Unicode)
overview = Column(Unicode)
tagline = Column(Unicode)
title = Column(Unicode)
movie_id = Column(Integer, ForeignKey('trakt_movies.id'))
def __init__(self, translation, session):
super().__init__()
self.update(translation, session)
def update(self, translation, session):
for col in translation.keys():
setattr(self, col, translation.get(col))
class TraktShowTranslation(Base):
__tablename__ = 'trakt_show_translations'
id = Column(Integer, primary_key=True, autoincrement=True)
language = Column(Unicode)
overview = Column(Unicode)
title = Column(Unicode)
show_id = Column(Integer, ForeignKey('trakt_shows.id'))
def __init__(self, translation, session):
super().__init__()
self.update(translation, session)
def update(self, translation, session):
for col in translation.keys():
setattr(self, col, translation.get(col))
def get_translations(ident, style):
url = get_api_url(style + 's', ident, 'translations')
trakt_translation = TraktShowTranslation if style == 'show' else TraktMovieTranslation
trakt_translation_id = getattr(trakt_translation, style + '_id')
translations = []
req_session = get_session()
try:
results = req_session.get(url, params={'extended': 'full'}).json()
with Session() as session:
for result in results:
translation = (
session.query(trakt_translation)
.filter(
and_(
trakt_translation.language == result.get('language'),
trakt_translation_id == ident,
)
)
.first()
)
if not translation:
translation = trakt_translation(result, session)
translations.append(translation)
return translations
except requests.RequestException as e:
logger.debug('Error adding translations to trakt id {}: {}', ident, e)
class TraktGenre(Base):
__tablename__ = 'trakt_genres'
name = Column(Unicode, primary_key=True)
show_genres_table = Table(
'trakt_show_genres',
Base.metadata,
Column('show_id', Integer, ForeignKey('trakt_shows.id')),
Column('genre_id', Unicode, ForeignKey('trakt_genres.name')),
)
Base.register_table(show_genres_table)
movie_genres_table = Table(
'trakt_movie_genres',
Base.metadata,
Column('movie_id', Integer, ForeignKey('trakt_movies.id')),
Column('genre_id', Unicode, ForeignKey('trakt_genres.name')),
)
Base.register_table(movie_genres_table)
class TraktActor(Base):
__tablename__ = 'trakt_actors'
id = Column(Integer, primary_key=True, nullable=False)
name = Column(Unicode)
slug = Column(Unicode)
tmdb = Column(Integer)
imdb = Column(Unicode)
biography = Column(Unicode)
birthday = Column(Date)
death = Column(Date)
homepage = Column(Unicode)
def __init__(self, actor, session):
super().__init__()
self.update(actor, session)
def update(self, actor, session):
if self.id and self.id != actor.get('ids').get('trakt'):
raise Exception('Tried to update db actors with different actor data')
elif not self.id:
self.id = actor.get('ids').get('trakt')
self.name = actor.get('name')
ids = actor.get('ids')
self.imdb = ids.get('imdb')
self.slug = ids.get('slug')
self.tmdb = ids.get('tmdb')
self.biography = actor.get('biography')
if actor.get('birthday'):
self.birthday = dateutil_parse(actor.get('birthday'))
if actor.get('death'):
self.death = dateutil_parse(actor.get('death'))
self.homepage = actor.get('homepage')
def to_dict(self):
return {'name': self.name, 'trakt_id': self.id, 'imdb_id': self.imdb, 'tmdb_id': self.tmdb}
show_actors_table = Table(
'trakt_show_actors',
Base.metadata,
Column('show_id', Integer, ForeignKey('trakt_shows.id')),
Column('actors_id', Integer, ForeignKey('trakt_actors.id')),
)
Base.register_table(show_actors_table)
movie_actors_table = Table(
'trakt_movie_actors',
Base.metadata,
Column('movie_id', Integer, ForeignKey('trakt_movies.id')),
Column('actors_id', Integer, ForeignKey('trakt_actors.id')),
)
Base.register_table(movie_actors_table)
def get_db_actors(ident, style):
actors = {}
url = get_api_url(style + 's', ident, 'people')
req_session = get_session()
try:
results = req_session.get(url, params={'extended': 'full'}).json()
with Session() as session:
for result in results.get('cast'):
trakt_id = result.get('person').get('ids').get('trakt')
# sometimes an actor can occur twice in the list by mistake. This check is to avoid this unlikely event
if trakt_id in actors:
continue
actor = session.query(TraktActor).filter(TraktActor.id == trakt_id).first()
if not actor:
actor = TraktActor(result.get('person'), session)
actors[trakt_id] = actor
return list(actors.values())
except requests.RequestException as e:
logger.debug('Error searching for actors for trakt id {}', e)
return
def get_translations_dict(translate, style):
res = {}
for lang in translate:
info = {'overview': lang.overview, 'title': lang.title}
if style == 'movie':
info['tagline'] = lang.tagline
res[lang.language] = info
return res
def list_actors(actors):
res = {}
for actor in actors:
info = {
'trakt_id': actor.id,
'name': actor.name,
'imdb_id': str(actor.imdb),
'trakt_slug': actor.slug,
'tmdb_id': str(actor.tmdb),
'birthday': actor.birthday.strftime("%Y/%m/%d") if actor.birthday else None,
'biography': actor.biography,
'homepage': actor.homepage,
'death': actor.death.strftime("%Y/%m/%d") if actor.death else None,
}
res[str(actor.id)] = info
return res
class TraktEpisode(Base):
__tablename__ = 'trakt_episodes'
id = Column(Integer, primary_key=True, autoincrement=False)
tvdb_id = Column(Integer)
imdb_id = Column(Unicode)
tmdb_id = Column(Integer)
tvrage_id = Column(Unicode)
title = Column(Unicode)
season = Column(Integer)
number = Column(Integer)
number_abs = Column(Integer)
overview = Column(Unicode)
first_aired = Column(DateTime)
updated_at = Column(DateTime)
cached_at = Column(DateTime)
series_id = Column(Integer, ForeignKey('trakt_shows.id'), nullable=False)
def __init__(self, trakt_episode, session):
super().__init__()
self.update(trakt_episode, session)
def update(self, trakt_episode, session):
"""Updates this record from the trakt media object `trakt_episode` returned by the trakt api."""
if self.id and self.id != trakt_episode['ids']['trakt']:
raise Exception('Tried to update db ep with different ep data')
elif not self.id:
self.id = trakt_episode['ids']['trakt']
self.imdb_id = trakt_episode['ids']['imdb']
self.tmdb_id = trakt_episode['ids']['tmdb']
self.tvrage_id = trakt_episode['ids']['tvrage']
self.tvdb_id = trakt_episode['ids']['tvdb']
self.first_aired = None
if trakt_episode.get('first_aired'):
self.first_aired = dateutil_parse(trakt_episode['first_aired'], ignoretz=True)
self.updated_at = dateutil_parse(trakt_episode.get('updated_at'), ignoretz=True)
self.cached_at = datetime.now()
for col in ['title', 'season', 'number', 'number_abs', 'overview']:
setattr(self, col, trakt_episode.get(col))
@property
def expired(self):
# TODO should episode have its own expiration function?
return False
class TraktSeason(Base):
__tablename__ = 'trakt_seasons'
id = Column(Integer, primary_key=True, autoincrement=False)
tvdb_id = Column(Integer)
tmdb_id = Column(Integer)
tvrage_id = Column(Unicode)
title = Column(Unicode)
number = Column(Integer)
episode_count = Column(Integer)
aired_episodes = Column(Integer)
overview = Column(Unicode)
first_aired = Column(DateTime)
ratings = Column(Integer)
votes = Column(Integer)
cached_at = Column(DateTime)
series_id = Column(Integer, ForeignKey('trakt_shows.id'), nullable=False)
def __init__(self, trakt_season, session):
super().__init__()
self.update(trakt_season, session)
def update(self, trakt_season, session):
"""Updates this record from the trakt media object `trakt_episode` returned by the trakt api."""
if self.id and self.id != trakt_season['ids']['trakt']:
raise Exception('Tried to update db season with different season data')
elif not self.id:
self.id = trakt_season['ids']['trakt']
self.tmdb_id = trakt_season['ids']['tmdb']
self.tvrage_id = trakt_season['ids']['tvrage']
self.tvdb_id = trakt_season['ids']['tvdb']
self.first_aired = None
if trakt_season.get('first_aired'):
self.first_aired = dateutil_parse(trakt_season['first_aired'], ignoretz=True)
self.cached_at = datetime.now()
for col in [
'title',
'number',
'episode_count',
'aired_episodes',
'ratings',
'votes',
'overview',
]:
setattr(self, col, trakt_season.get(col))
@property
def expired(self):
# TODO should season have its own expiration function?
return False
class TraktShow(Base):
__tablename__ = 'trakt_shows'
id = Column(Integer, primary_key=True, autoincrement=False)
title = Column(Unicode)
year = Column(Integer)
slug = Column(Unicode)
tvdb_id = Column(Integer)
imdb_id = Column(Unicode)
tmdb_id = Column(Integer)
tvrage_id = Column(Unicode)
overview = Column(Unicode)
first_aired = Column(DateTime)
air_day = Column(Unicode)
air_time = Column(Time)
timezone = Column(Unicode)
runtime = Column(Integer)
certification = Column(Unicode)
network = Column(Unicode)
country = Column(Unicode)
status = Column(String)
rating = Column(Integer)
votes = Column(Integer)
language = Column(Unicode)
homepage = Column(Unicode)
trailer = Column(Unicode)
aired_episodes = Column(Integer)
_translations = relation(TraktShowTranslation)
_translation_languages = Column('translation_languages', Unicode)
translation_languages = json_synonym('_translation_languages')
episodes = relation(
TraktEpisode, backref='show', cascade='all, delete, delete-orphan', lazy='dynamic'
)
seasons = relation(
TraktSeason, backref='show', cascade='all, delete, delete-orphan', lazy='dynamic'
)
genres = relation(TraktGenre, secondary=show_genres_table)
_actors = relation(TraktActor, secondary=show_actors_table)
updated_at = Column(DateTime)
cached_at = Column(DateTime)
def to_dict(self):
return {
"id": self.id,
"title": self.title,
"year": self.year,
"slug": self.slug,
"tvdb_id": self.tvdb_id,
"imdb_id": self.imdb_id,
"tmdb_id": self.tmdb_id,
"tvrage_id": self.tvrage_id,
"overview": self.overview,
"first_aired": self.first_aired,
"air_day": self.air_day,
"air_time": self.air_time.strftime("%H:%M") if self.air_time else None,
"timezone": self.timezone,
"runtime": self.runtime,
"certification": self.certification,
"network": self.network,
"country": self.country,
"status": self.status,
"rating": self.rating,
"votes": self.votes,
"language": self.language,
"homepage": self.homepage,
"number_of_aired_episodes": self.aired_episodes,
"genres": [g.name for g in self.genres],
"updated_at": self.updated_at,
"cached_at": self.cached_at,
}
def __init__(self, trakt_show, session):
super().__init__()
self.update(trakt_show, session)
def update(self, trakt_show, session):
"""Updates this record from the trakt media object `trakt_show` returned by the trakt api."""
if self.id and self.id != trakt_show['ids']['trakt']:
raise Exception('Tried to update db show with different show data')
elif not self.id:
self.id = trakt_show['ids']['trakt']
self.slug = trakt_show['ids']['slug']
self.imdb_id = trakt_show['ids']['imdb']
self.tmdb_id = trakt_show['ids']['tmdb']
self.tvrage_id = trakt_show['ids']['tvrage']
self.tvdb_id = trakt_show['ids']['tvdb']
if trakt_show.get('airs'):
airs = trakt_show.get('airs')
self.air_day = airs.get('day')
self.timezone = airs.get('timezone')
if airs.get('time'):
self.air_time = datetime.strptime(airs.get('time'), '%H:%M').time()
else:
self.air_time = None
if trakt_show.get('first_aired'):
self.first_aired = dateutil_parse(trakt_show.get('first_aired'), ignoretz=True)
else:
self.first_aired = None
self.updated_at = dateutil_parse(trakt_show.get('updated_at'), ignoretz=True)
for col in [
'overview',
'runtime',
'rating',
'votes',
'language',
'title',
'year',
'runtime',
'certification',
'network',
'country',
'status',
'aired_episodes',
'trailer',
'homepage',
]:
setattr(self, col, trakt_show.get(col))
# Sometimes genres and translations are None but we really do want a list, hence the "or []"
self.genres = [
TraktGenre(name=g.replace(' ', '-')) for g in trakt_show.get('genres') or []
]
self.cached_at = datetime.now()
self.translation_languages = trakt_show.get('available_translations') or []
def get_episode(self, season, number, session, only_cached=False):
# TODO: Does series data being expired mean all episode data should be refreshed?
episode = (
self.episodes.filter(TraktEpisode.season == season)
.filter(TraktEpisode.number == number)
.first()
)
if not episode or self.expired:
url = get_api_url(
'shows', self.id, 'seasons', season, 'episodes', number, '?extended=full'
)
if only_cached:
raise LookupError('Episode %s %s not found in cache' % (season, number))
logger.debug(
'Episode {} {} not found in cache, looking up from trakt.', season, number
)
try:
data = get_session().get(url).json()
except requests.RequestException:
raise LookupError('Error Retrieving Trakt url: %s' % url)
if not data:
raise LookupError('No data in response from trakt %s' % url)
episode = self.episodes.filter(TraktEpisode.id == data['ids']['trakt']).first()
if episode:
episode.update(data, session)
else:
episode = TraktEpisode(data, session)
self.episodes.append(episode)
session.commit()
return episode
def get_season(self, number, session, only_cached=False):
# TODO: Does series data being expired mean all season data should be refreshed?
season = self.seasons.filter(TraktSeason.number == number).first()
if not season or self.expired:
url = get_api_url('shows', self.id, 'seasons', '?extended=full')
if only_cached:
raise LookupError('Season %s not found in cache' % number)
logger.debug('Season {} not found in cache, looking up from trakt.', number)
try:
ses = get_session()
data = ses.get(url).json()
except requests.RequestException:
raise LookupError('Error Retrieving Trakt url: %s' % url)
if not data:
raise LookupError('No data in response from trakt %s' % url)
# We fetch all seasons for the given show because we barely get any data otherwise
for season_result in data:
db_season = self.seasons.filter(
TraktSeason.id == season_result['ids']['trakt']
).first()
if db_season:
db_season.update(season_result, session)
else:
db_season = TraktSeason(season_result, session)
self.seasons.append(db_season)
if number == season_result['number']:
season = db_season
if not season:
raise LookupError('Season %s not found for show %s' % (number, self.title))
session.commit()
return season
@property
def expired(self):
"""
:return: True if show details are considered to be expired, ie. need of update
"""
# TODO stolen from imdb plugin, maybe there's a better way?
if self.cached_at is None:
logger.debug('cached_at is None: {}', self)
return True
refresh_interval = 2
# if show has been cancelled or ended, then it is unlikely to be updated often
if self.year and (self.status == 'ended' or self.status == 'canceled'):
# Make sure age is not negative
age = max((datetime.now().year - self.year), 0)
refresh_interval += age * 5
logger.debug('show `{}` age {} expires in {} days', self.title, age, refresh_interval)
return self.cached_at < datetime.now() - timedelta(days=refresh_interval)
@property
def translations(self):
if not self._translations:
self._translations = get_translations(self.id, 'show')
return self._translations
@property
def actors(self):
if not self._actors:
self._actors[:] = get_db_actors(self.id, 'show')
return self._actors
def __repr__(self):
return '<name=%s, id=%s>' % (self.title, self.id)
class TraktMovie(Base):
__tablename__ = 'trakt_movies'
id = Column(Integer, primary_key=True, autoincrement=False)
title = Column(Unicode)
year = Column(Integer)
slug = Column(Unicode)
imdb_id = Column(Unicode)
tmdb_id = Column(Integer)
tagline = Column(Unicode)
overview = Column(Unicode)
released = Column(Date)
runtime = Column(Integer)
rating = Column(Integer)
votes = Column(Integer)
trailer = Column(Unicode)
homepage = Column(Unicode)
language = Column(Unicode)
updated_at = Column(DateTime)
cached_at = Column(DateTime)
_translations = relation(TraktMovieTranslation, backref='movie')
_translation_languages = Column('translation_languages', Unicode)
translation_languages = json_synonym('_translation_languages')
genres = relation(TraktGenre, secondary=movie_genres_table)
_actors = relation(TraktActor, secondary=movie_actors_table)
def __init__(self, trakt_movie, session):
super().__init__()
self.update(trakt_movie, session)
def to_dict(self):
return {
"id": self.id,
"title": self.title,
"year": self.year,
"slug": self.slug,
"imdb_id": self.imdb_id,
"tmdb_id": self.tmdb_id,
"tagline": self.tagline,
"overview": self.overview,
"released": self.released,
"runtime": self.runtime,
"rating": self.rating,
"votes": self.votes,
"language": self.language,
"homepage": self.homepage,
"trailer": self.trailer,
"genres": [g.name for g in self.genres],
"updated_at": self.updated_at,
"cached_at": self.cached_at,
}
def update(self, trakt_movie, session):
"""Updates this record from the trakt media object `trakt_movie` returned by the trakt api."""
if self.id and self.id != trakt_movie['ids']['trakt']:
raise Exception('Tried to update db movie with different movie data')
elif not self.id:
self.id = trakt_movie['ids']['trakt']
self.slug = trakt_movie['ids']['slug']
self.imdb_id = trakt_movie['ids']['imdb']
self.tmdb_id = trakt_movie['ids']['tmdb']
for col in [
'title',
'overview',
'runtime',
'rating',
'votes',
'language',
'tagline',
'year',
'trailer',
'homepage',
]:
setattr(self, col, trakt_movie.get(col))
if trakt_movie.get('released'):
self.released = dateutil_parse(trakt_movie.get('released'), ignoretz=True).date()
self.updated_at = dateutil_parse(trakt_movie.get('updated_at'), ignoretz=True)
self.genres = [TraktGenre(name=g.replace(' ', '-')) for g in trakt_movie.get('genres', [])]
self.cached_at = datetime.now()
self.translation_languages = trakt_movie.get('available_translations', [])
@property
def expired(self):
"""
:return: True if movie details are considered to be expired, ie. need of update
"""
# TODO stolen from imdb plugin, maybe there's a better way?
if self.updated_at is None:
logger.debug('updated_at is None: {}', self)
return True
refresh_interval = 2
if self.year:
# Make sure age is not negative
age = max((datetime.now().year - self.year), 0)
refresh_interval += age * 5
logger.debug('movie `{}` age {} expires in {} days', self.title, age, refresh_interval)
return self.cached_at < datetime.now() - timedelta(days=refresh_interval)
@property
def translations(self):
if not self._translations:
self._translations = get_translations(self.id, 'movie')
return self._translations
@property
def actors(self):
if not self._actors:
self._actors[:] = get_db_actors(self.id, 'movie')
return self._actors
class TraktShowSearchResult(Base):
__tablename__ = 'trakt_show_search_results'
id = Column(Integer, primary_key=True)
search = Column(Unicode, unique=True, nullable=False)
series_id = Column(Integer, ForeignKey('trakt_shows.id'), nullable=True)
series = relation(TraktShow, backref='search_strings')
def __init__(self, search, series_id=None, series=None):
self.search = search.lower()
if series_id:
self.series_id = series_id
if series:
self.series = series
class TraktMovieSearchResult(Base):
__tablename__ = 'trakt_movie_search_results'
id = Column(Integer, primary_key=True)
search = Column(Unicode, unique=True, nullable=False)
movie_id = Column(Integer, ForeignKey('trakt_movies.id'), nullable=True)
movie = relation(TraktMovie, backref='search_strings')
def __init__(self, search, movie_id=None, movie=None):
self.search = search.lower()
if movie_id:
self.movie_id = movie_id
if movie:
self.movie = movie
class TraktMovieIds:
"""Simple class that holds a variety of possible IDs that Trakt utilize in their API, eg. imdb id, trakt id"""
def __init__(self, trakt_id=None, trakt_slug=None, tmdb_id=None, imdb_id=None, **kwargs):
self.trakt_id = trakt_id
self.trakt_slug = trakt_slug
self.tmdb_id = tmdb_id
self.imdb_id = imdb_id
def get_trakt_id(self):
return self.trakt_id or self.trakt_slug
def to_dict(self):
"""Returns a dict containing id fields that are relevant for a movie"""
return {
'id': self.trakt_id,
'slug': self.trakt_slug,
'tmdb_id': self.tmdb_id,
'imdb_id': self.imdb_id,
}
def __bool__(self):
return any([self.trakt_id, self.trakt_slug, self.tmdb_id, self.imdb_id])
class TraktShowIds:
"""Simple class that holds a variety of possible IDs that Trakt utilize in their API, eg. imdb id, trakt id"""
def __init__(
self,
trakt_id=None,
trakt_slug=None,
tmdb_id=None,
imdb_id=None,
tvdb_id=None,
tvrage_id=None,
**kwargs,
):
self.trakt_id = trakt_id
self.trakt_slug = trakt_slug
self.tmdb_id = tmdb_id
self.imdb_id = imdb_id
self.tvdb_id = tvdb_id
self.tvrage_id = tvrage_id
def get_trakt_id(self):
return self.trakt_id or self.trakt_slug
def to_dict(self):
"""Returns a dict containing id fields that are relevant for a show/season/episode"""
return {
'id': self.trakt_id,
'slug': self.trakt_slug,
'tmdb_id': self.tmdb_id,
'imdb_id': self.imdb_id,
'tvdb_id': self.tvdb_id,
'tvrage_id': self.tvrage_id,
}
def __bool__(self):
return any(
[
self.trakt_id,
self.trakt_slug,
self.tmdb_id,
self.imdb_id,
self.tvdb_id,
self.tvrage_id,
]
)
def get_item_from_cache(table, session, title=None, year=None, trakt_ids=None):
"""
Get the cached info for a given show/movie from the database.
:param table: Either TraktMovie or TraktShow
:param title: Title of the show/movie
:param year: First release year
:param trakt_ids: instance of TraktShowIds or TraktMovieIds
:param session: database session object
:return: query result
"""
result = None
if trakt_ids:
result = (
session.query(table)
.filter(
or_(getattr(table, col) == val for col, val in trakt_ids.to_dict().items() if val)
)
.first()
)
elif title:
title, y = split_title_year(title)
year = year or y
query = session.query(table).filter(table.title == title)
if year:
query = query.filter(table.year == year)
result = query.first()
return result
def get_trakt_id_from_id(trakt_ids, media_type):
if not trakt_ids:
raise LookupError('No lookup arguments provided.')
requests_session = get_session()
for id_type, identifier in trakt_ids.to_dict().items():
if not identifier:
continue
stripped_id_type = id_type.rstrip('_id') # need to remove _id for the api call
try:
logger.debug('Searching with params: {}={}', stripped_id_type, identifier)
results = requests_session.get(
get_api_url('search'), params={'id_type': stripped_id_type, 'id': identifier}
).json()
except requests.RequestException as e:
raise LookupError(
'Searching trakt for %s=%s failed with error: %s'
% (stripped_id_type, identifier, e)
)
for result in results:
if result['type'] != media_type:
continue
return result[media_type]['ids']['trakt']
def get_trakt_id_from_title(title, media_type, year=None):
if not title:
raise LookupError('No lookup arguments provided.')
requests_session = get_session()
# Try finding trakt id based on title and year
parsed_title, y = split_title_year(title)
y = year or y
try:
params = {'query': parsed_title, 'type': media_type, 'year': y}
logger.debug('Type of title: {}', type(parsed_title))
logger.debug(
'Searching with params: {}', ', '.join('{}={}'.format(k, v) for k, v in params.items())
)
results = requests_session.get(get_api_url('search'), params=params).json()
except requests.RequestException as e:
raise LookupError('Searching trakt for %s failed with error: %s' % (title, e))
for result in results:
if year and result[media_type]['year'] != year:
continue
if parsed_title.lower() == result[media_type]['title'].lower():
return result[media_type]['ids']['trakt']
# grab the first result if there is no exact match
if results:
return results[0][media_type]['ids']['trakt']
def get_trakt_data(media_type, title=None, year=None, trakt_ids=None):
trakt_id = None
if trakt_ids:
trakt_id = trakt_ids.get_trakt_id()
if not trakt_id and trakt_ids:
trakt_id = get_trakt_id_from_id(trakt_ids, media_type)
if not trakt_id and title:
trakt_id = get_trakt_id_from_title(title, media_type, year=year)
if not trakt_id:
raise LookupError(
'No results on Trakt.tv, title=%s, ids=%s.'
% (title, trakt_ids.to_dict if trakt_ids else None)
)
# Get actual data from trakt
try:
return (
get_session()
.get(get_api_url(media_type + 's', trakt_id), params={'extended': 'full'})
.json()
)
except requests.RequestException as e:
raise LookupError('Error getting trakt data for id %s: %s' % (trakt_id, e))
def get_user_data(data_type, media_type, session, username):
"""
Fetches user data from Trakt.tv on the /users/<username>/<data_type>/<media_type> end point. Eg. a user's
movie collection is fetched from /users/<username>/collection/movies.
:param data_type: Name of the data type eg. collection, watched etc.
:param media_type: Type of media we want <data_type> for eg. shows, episodes, movies.
:param session: A trakt requests session with a valid token
:param username: Username of the user to fetch data
:return:
"""
endpoint = '{}/{}'.format(data_type, media_type)
try:
data = session.get(get_api_url('users', username, data_type, media_type)).json()
if not data:
logger.warning('No {} data returned from trakt endpoint {}.', data_type, endpoint)
return []
logger.verbose(
'Received {} records from trakt.tv for user {} from endpoint {}',
len(data),
username,
endpoint,
)
# extract show, episode and movie information
for item in data:
episode = item.pop('episode', {})
season = item.pop('season', {})
show = item.pop('show', {})
movie = item.pop('movie', {})
item.update(episode)
item.update(season)
item.update(movie)
# show is irrelevant if either episode or season is present
if not episode and not season:
item.update(show)
return data
except requests.RequestException as e:
raise plugin.PluginError(
'Error fetching data from trakt.tv endpoint %s for user %s: %s'
% (endpoint, username, e)
)
def get_username(username=None, account=None):
"""Returns 'me' if account is provided and username is not"""
if not username and account:
return 'me'
return username
|
malkavi/Flexget
|
flexget/components/trakt/db.py
|
Python
|
mit
| 42,299
|
[
"VisIt"
] |
7f8c202e60c7252b9948da98cc0a1bb27bb1ca24e794d42e0b941e7b4d02ebb7
|
# Copyright 2014 Roberto Brian Sarrionandia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import webapp2
import jinja2
import os
import tusers
from google.appengine.ext import ndb
from forms import CustomStatusForm
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class CustomRoomStatusHandler(webapp2.RequestHandler):
def get(self):
user = tusers.get_current_user()
#Get the requested tournament
tid = self.request.get('t')
t_key = ndb.Key('Tournament', int(tid))
t = t_key.get()
form = CustomStatusForm()
if (user and user.key in t.owner):
template_values = {
'user' : user,
't' : t,
'logout' : tusers.create_logout_url('/'),
'form' : form,
}
template = JINJA_ENVIRONMENT.get_template('view/custom_room_status.html')
self.response.write(template.render(template_values))
else:
self.redirect(tusers.create_login_url(self.request.uri))
def post(self):
user = tusers.get_current_user()
#Get the requested tournament
tid = self.request.get('t')
t_key = ndb.Key('Tournament', int(tid))
t = t_key.get()
if (user and user.key in t.owner):
form = CustomStatusForm(self.request.POST)
if form.validate():
if not t.customRoomStatus:
t.customRoomStatus = [form.name.data]
else:
t.customRoomStatus.append(form.name.data)
t.put()
self.redirect('/custom_room_status?t=' + tid)
else:
template_values = {
'user' : user,
't' : t,
'logout' : tusers.create_logout_url('/'),
'form' : form,
}
template = JINJA_ENVIRONMENT.get_template('view/custom_room_status.html')
self.response.write(template.render(template_values))
else:
self.redirect(tusers.create_login_url(self.request.uri))
app = webapp2.WSGIApplication([
('/custom_room_status', CustomRoomStatusHandler)
], debug=True)
|
sarrionandia/tournatrack
|
custom_room_status.py
|
Python
|
apache-2.0
| 2,445
|
[
"Brian"
] |
a545baa9e3e7fd12fde47dc142cce58504b58f9072114576c9a8ddd8dad2e5ff
|
import xarray as xr
from .common import NetCDF4, expects_file_info
__all__ = [
'HOAPS',
]
class HOAPS(NetCDF4):
"""File handler that can read data from HOAPS NetCDF4 files.
This object handles HOAPS V3.3 NetCDF4 files such as they are
compatible with :mod:`typhon.collocations`, i.e.:
* convert the content of the *time* variable to numpydtetime64 objects.
Examples:
Draw a world map with all measurements of OceanRAIN:
.. :code-block:: python3
from typhon.files import FileSet, HOAPS
hoaps = FileSet(
name="HOAPS",
path=".../HOAPS_v3.3/{year}-{month}/hoaps-s.f16.{year}-{month}-{day}.nc",
handler=HOAPS(),
# This extracts the field *asst*:
read_args={
"fields": ["asst"],
},
# The path of HOAPS files does not provide the end of the
# file's time coverage. Hence, we set it here explicitly:
time_coverage="1 day",
)
print(hoaps["2013"])
"""
def __init__(self, **kwargs):
"""Initializes a HOAPS file handler class
Args:
**kwargs: Additional key word arguments that are allowed for the
:class:`typhon.files.handlers.common.NetCDF4` class.
"""
# Call the base class initializer
super().__init__(**kwargs)
@expects_file_info()
def get_info(self, file, **kwargs):
"""Get information about an HOAPS dataset file
Args:
file: A string containing path and name or a :class:`FileInfo`
object of the file of which to get the information about.
**kwargs: Additional keyword arguments.
Returns:
A :class:`FileInfo` object.
"""
data = super().read(file, fields=("time",))
file.times = [data["time"].min().item(0), data["time"].max().item(0)]
return file
@expects_file_info()
def read(self, filename, **kwargs):
"""Read and parse a NetCDF file and load it to a xarray.Dataset
Args:
filename: Path and name of the file as string or FileInfo object.
**kwargs: Additional key word arguments that are allowed for the
:class:`~typhon.files.handlers.common.NetCDF4` class.
Returns:
A xarray.Dataset object.
"""
# Make sure that the standard fields are always gonna be imported:
fields = kwargs.pop("fields", None)
if fields is not None:
fields = {"time", "lat", "lon"} | set(fields)
# xarray has problems with decoding the time variable correctly. Hence,
# we disable it here:
decode_cf = kwargs.pop("decode_cf", True)
data = super().read(filename, fields=fields, decode_cf=False, **kwargs)
# Then we fix the problem (we need integer64 instead of integer 32):
attrs = data["time"].attrs.copy()
data["time"] = data["time"].astype(int)
data["time"].attrs = attrs
# Do decoding now (just if the user wanted it!)
if decode_cf:
return xr.decode_cf(data)
return data
|
atmtools/typhon
|
typhon/files/handlers/hoaps.py
|
Python
|
mit
| 3,243
|
[
"NetCDF"
] |
f5b6bd5d1d5ad8288d7493470e61770843cb4b0e0ba4c61a6289d0567066f6a9
|
"""
Test atomic coordinates and neighbor lists.
"""
import os
import numpy as np
import unittest
from rdkit import Chem
import deepchem as dc
class TestAtomicCoordinates(unittest.TestCase):
"""
Test AtomicCoordinates.
"""
def test_atomic_coordinates(self):
"""
Simple test that atomic coordinates returns ndarray of right shape.
"""
current_dir = os.path.dirname(os.path.realpath(__file__))
protein_file = os.path.join(current_dir, "../../dock/tests/1jld_protein.pdb")
ligand_file = os.path.join(current_dir, "../../dock/tests/1jld_ligand.sdf")
finder = dc.dock.ConvexHullPocketFinder()
pocket_featurizer = dc.feat.BindingPocketFeaturizer()
pockets, pocket_atoms, pocket_coords = finder.find_pockets(protein_file, ligand_file)
n_pockets = len(pockets)
pocket_features = pocket_featurizer.featurize(
protein_file, pockets, pocket_atoms, pocket_coords)
assert isinstance(pocket_features, np.ndarray)
assert pocket_features.shape[0] == n_pockets
|
Agent007/deepchem
|
deepchem/feat/tests/test_binding_pocket_features.py
|
Python
|
mit
| 1,022
|
[
"RDKit"
] |
69d9af1f8482ce518a03c7ba6d1e6f639acff449f571975e252fabeac09792c5
|
"""Van der Waals energy and radii terms per AMBER atom type"""
vdw_energy = {'IP': 0.00277, 'HS': 0.0157, 'HP': 0.0157, 'Na': 0.00277, 'N*': 0.17, 'Li': 0.0183, 'HO': 0.0,
'Rb': 0.00017, 'HC': 0.0157, 'HA': 0.015, 'O3': 0.21, 'CQ': 0.086, 'C*': 0.086, 'NA': 0.17, 'NB': 0.17,
'NC': 0.17, 'O2': 0.21, 'I': 0.4, 'Br': 0.32, 'H': 0.0157, 'HW': 0.0, 'C0': 0.459789, 'K': 0.000328,
'CK': 0.086, 'Cs': 8.06e-05, 'C': 0.086, 'Cl': 0.1, 'CN': 0.086, 'CM': 0.086, 'F': 0.061, 'CC': 0.086,
'CB': 0.086, 'CA': 0.086, 'Zn': 0.0125, 'O': 0.21, 'N': 0.17, 'P': 0.2, 'S': 0.25, 'CR': 0.086,
'N2': 0.17, 'N3': 0.17, 'CW': 0.086, 'CV': 0.086, 'CT': 0.1094, 'MG': 0.8947, 'OH': 0.2104, 'H2': 0.0157,
'H3': 0.0157, 'H1': 0.0157, 'H4': 0.015, 'H5': 0.015, 'SH': 0.25, 'OW': 0.152, 'OS': 0.17}
vdw_radii = {'IP': 1.868, 'HS': 0.6, 'HP': 1.1, 'Na': 1.868, 'N*': 1.824, 'Li': 1.137, 'HO': 0.0001, 'Rb': 2.956,
'HC': 1.487, 'HA': 1.459, 'O3': 1.6612, 'CQ': 1.908, 'C*': 1.908, 'NA': 1.824, 'NB': 1.824, 'NC': 1.824,
'O2': 1.6612, 'I': 2.35, 'Br': 2.22, 'H': 0.6, 'HW': 0.0001, 'C0': 1.7131, 'K': 2.658, 'CK': 1.908,
'Cs': 3.395, 'C': 1.908, 'Cl': 2.47, 'CN': 1.908, 'CM': 1.908, 'F': 1.75, 'CC': 1.908, 'CB': 1.908,
'CA': 1.908, 'Zn': 1.1, 'O': 1.6612, 'N': 1.824, 'P': 2.1, 'S': 2.0, 'CR': 1.908, 'N2': 1.824, 'N3': 1.875,
'CW': 1.908, 'CV': 1.908, 'CT': 1.908, 'MG': 0.7926, 'OH': 1.721, 'H2': 1.287, 'H3': 1.187, 'H1': 1.387,
'H4': 1.409, 'H5': 1.359, 'SH': 2.0, 'OW': 1.7683, 'OS': 1.6837}
|
brianjimenez/lightdock
|
lightdock/scoring/vdw/data/vdw.py
|
Python
|
gpl-3.0
| 1,634
|
[
"Amber"
] |
44e9f6020e9105596d1451e0b47d28c56054519b4171e6373d61db927d9647b9
|
"""
This is our testing framework.
Goals:
* it should be compatible with py.test and operate very similarly
(or identically)
* doesn't require any external dependencies
* preferably all the functionality should be in this file only
* no magic, just import the test file and execute the test functions, that's it
* portable
"""
from __future__ import print_function, division
import os
import sys
import platform
import inspect
import traceback
import pdb
import re
import linecache
from fnmatch import fnmatch
from timeit import default_timer as clock
import doctest as pdoctest # avoid clashing with our doctest() function
from doctest import DocTestFinder, DocTestRunner
import random
import subprocess
import signal
import stat
from inspect import isgeneratorfunction
from sympy.core.cache import clear_cache
from sympy.core.compatibility import exec_, PY3, string_types, range
from sympy.utilities.misc import find_executable
from sympy.external import import_module
from sympy.utilities.exceptions import SymPyDeprecationWarning
IS_WINDOWS = (os.name == 'nt')
class Skipped(Exception):
pass
import __future__
# add more flags ??
future_flags = __future__.division.compiler_flag
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in ``s``, and return the result.
If the string ``s`` is Unicode, it is encoded using the stdout
encoding and the ``backslashreplace`` error handler.
"""
# After a 2to3 run the below code is bogus, so wrap it with a version check
if not PY3:
if isinstance(s, unicode):
s = s.encode(pdoctest._encoding, 'backslashreplace')
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
pdoctest._indent = _indent
# ovverride reporter to maintain windows and python3
def _report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
s = self._checker.output_difference(example, got, self.optionflags)
s = s.encode('raw_unicode_escape').decode('utf8', 'ignore')
out(self._failure_header(test, example) + s)
if PY3 and IS_WINDOWS:
DocTestRunner.report_failure = _report_failure
def convert_to_native_paths(lst):
"""
Converts a list of '/' separated paths into a list of
native (os.sep separated) paths and converts to lowercase
if the system is case insensitive.
"""
newlst = []
for i, rv in enumerate(lst):
rv = os.path.join(*rv.split("/"))
# on windows the slash after the colon is dropped
if sys.platform == "win32":
pos = rv.find(':')
if pos != -1:
if rv[pos + 1] != '\\':
rv = rv[:pos + 1] + '\\' + rv[pos + 1:]
newlst.append(sys_normcase(rv))
return newlst
def get_sympy_dir():
"""
Returns the root sympy directory and set the global value
indicating whether the system is case sensitive or not.
"""
global sys_case_insensitive
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..")
sympy_dir = os.path.normpath(sympy_dir)
sys_case_insensitive = (os.path.isdir(sympy_dir) and
os.path.isdir(sympy_dir.lower()) and
os.path.isdir(sympy_dir.upper()))
return sys_normcase(sympy_dir)
def sys_normcase(f):
if sys_case_insensitive: # global defined after call to get_sympy_dir()
return f.lower()
return f
def setup_pprint():
from sympy import pprint_use_unicode, init_printing
# force pprint to be in ascii mode in doctests
pprint_use_unicode(False)
# hook our nice, hash-stable strprinter
init_printing(pretty_print=False)
def run_in_subprocess_with_hash_randomization(function, function_args=(),
function_kwargs={}, command=sys.executable,
module='sympy.utilities.runtests', force=False):
"""
Run a function in a Python subprocess with hash randomization enabled.
If hash randomization is not supported by the version of Python given, it
returns False. Otherwise, it returns the exit value of the command. The
function is passed to sys.exit(), so the return value of the function will
be the return value.
The environment variable PYTHONHASHSEED is used to seed Python's hash
randomization. If it is set, this function will return False, because
starting a new subprocess is unnecessary in that case. If it is not set,
one is set at random, and the tests are run. Note that if this
environment variable is set when Python starts, hash randomization is
automatically enabled. To force a subprocess to be created even if
PYTHONHASHSEED is set, pass ``force=True``. This flag will not force a
subprocess in Python versions that do not support hash randomization (see
below), because those versions of Python do not support the ``-R`` flag.
``function`` should be a string name of a function that is importable from
the module ``module``, like "_test". The default for ``module`` is
"sympy.utilities.runtests". ``function_args`` and ``function_kwargs``
should be a repr-able tuple and dict, respectively. The default Python
command is sys.executable, which is the currently running Python command.
This function is necessary because the seed for hash randomization must be
set by the environment variable before Python starts. Hence, in order to
use a predetermined seed for tests, we must start Python in a separate
subprocess.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
Examples
========
>>> from sympy.utilities.runtests import (
... run_in_subprocess_with_hash_randomization)
>>> # run the core tests in verbose mode
>>> run_in_subprocess_with_hash_randomization("_test",
... function_args=("core",),
... function_kwargs={'verbose': True}) # doctest: +SKIP
# Will return 0 if sys.executable supports hash randomization and tests
# pass, 1 if they fail, and False if it does not support hash
# randomization.
"""
# Note, we must return False everywhere, not None, as subprocess.call will
# sometimes return None.
# First check if the Python version supports hash randomization
# If it doesn't have this support, it won't reconize the -R flag
p = subprocess.Popen([command, "-RV"], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode != 0:
return False
hash_seed = os.getenv("PYTHONHASHSEED")
if not hash_seed:
os.environ["PYTHONHASHSEED"] = str(random.randrange(2**32))
else:
if not force:
return False
# Now run the command
commandstring = ("import sys; from %s import %s;sys.exit(%s(*%s, **%s))" %
(module, function, function, repr(function_args),
repr(function_kwargs)))
try:
p = subprocess.Popen([command, "-R", "-c", commandstring])
p.communicate()
except KeyboardInterrupt:
p.wait()
finally:
# Put the environment variable back, so that it reads correctly for
# the current Python process.
if hash_seed is None:
del os.environ["PYTHONHASHSEED"]
else:
os.environ["PYTHONHASHSEED"] = hash_seed
return p.returncode
def run_all_tests(test_args=(), test_kwargs={}, doctest_args=(),
doctest_kwargs={}, examples_args=(), examples_kwargs={'quiet': True}):
"""
Run all tests.
Right now, this runs the regular tests (bin/test), the doctests
(bin/doctest), the examples (examples/all.py), and the sage tests (see
sympy/external/tests/test_sage.py).
This is what ``setup.py test`` uses.
You can pass arguments and keyword arguments to the test functions that
support them (for now, test, doctest, and the examples). See the
docstrings of those functions for a description of the available options.
For example, to run the solvers tests with colors turned off:
>>> from sympy.utilities.runtests import run_all_tests
>>> run_all_tests(test_args=("solvers",),
... test_kwargs={"colors:False"}) # doctest: +SKIP
"""
tests_successful = True
try:
# Regular tests
if not test(*test_args, **test_kwargs):
# some regular test fails, so set the tests_successful
# flag to false and continue running the doctests
tests_successful = False
# Doctests
print()
if not doctest(*doctest_args, **doctest_kwargs):
tests_successful = False
# Examples
print()
sys.path.append("examples")
from all import run_examples # examples/all.py
if not run_examples(*examples_args, **examples_kwargs):
tests_successful = False
# Sage tests
if not (sys.platform == "win32" or PY3):
# run Sage tests; Sage currently doesn't support Windows or Python 3
dev_null = open(os.devnull, 'w')
if subprocess.call("sage -v", shell=True, stdout=dev_null,
stderr=dev_null) == 0:
if subprocess.call("sage -python bin/test "
"sympy/external/tests/test_sage.py", shell=True) != 0:
tests_successful = False
if tests_successful:
return
else:
# Return nonzero exit code
sys.exit(1)
except KeyboardInterrupt:
print()
print("DO *NOT* COMMIT!")
sys.exit(1)
def test(*paths, **kwargs):
"""
Run tests in the specified test_*.py files.
Tests in a particular test_*.py file are run if any of the given strings
in ``paths`` matches a part of the test file's path. If ``paths=[]``,
tests in all test_*.py files are run.
Notes:
- If sort=False, tests are run in random order (not default).
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
**Explanation of test results**
====== ===============================================================
Output Meaning
====== ===============================================================
. passed
F failed
X XPassed (expected to fail but passed)
f XFAILed (expected to fail and indeed failed)
s skipped
w slow
T timeout (e.g., when ``--timeout`` is used)
K KeyboardInterrupt (when running the slow tests with ``--slow``,
you can interrupt one of them without killing the test runner)
====== ===============================================================
Colors have no additional meaning and are used just to facilitate
interpreting the output.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.test() # doctest: +SKIP
Run one file:
>>> sympy.test("sympy/core/tests/test_basic.py") # doctest: +SKIP
>>> sympy.test("_basic") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... "sympy/functions") # doctest: +SKIP
Run all tests in sympy/core and sympy/utilities:
>>> sympy.test("/core", "/util") # doctest: +SKIP
Run specific test from a file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... kw="test_equality") # doctest: +SKIP
Run specific test from any file:
>>> sympy.test(kw="subs") # doctest: +SKIP
Run the tests with verbose mode on:
>>> sympy.test(verbose=True) # doctest: +SKIP
Don't sort the test output:
>>> sympy.test(sort=False) # doctest: +SKIP
Turn on post-mortem pdb:
>>> sympy.test(pdb=True) # doctest: +SKIP
Turn off colors:
>>> sympy.test(colors=False) # doctest: +SKIP
Force colors, even when the output is not to a terminal (this is useful,
e.g., if you are piping to ``less -r`` and you still want colors)
>>> sympy.test(force_colors=False) # doctest: +SKIP
The traceback verboseness can be set to "short" or "no" (default is
"short")
>>> sympy.test(tb='no') # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. For instance, to run the first half of the test suite:
>>> sympy.test(split='1/2') # doctest: +SKIP
You can disable running the tests in a separate subprocess using
``subprocess=False``. This is done to support seeding hash randomization,
which is enabled by default in the Python versions where it is supported.
If subprocess=False, hash randomization is enabled/disabled according to
whether it has been enabled or not in the calling Python process.
However, even if it is enabled, the seed cannot be printed unless it is
called from a new Python process.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
If hash randomization is not supported ``subprocess=False`` is used
automatically.
>>> sympy.test(subprocess=False) # doctest: +SKIP
To set the hash randomization seed, set the environment variable
``PYTHONHASHSEED`` before running the tests. This can be done from within
Python using
>>> import os
>>> os.environ['PYTHONHASHSEED'] = '42' # doctest: +SKIP
Or from the command line using
$ PYTHONHASHSEED=42 ./bin/test
If the seed is not set, a random seed will be chosen.
Note that to reproduce the same hash values, you must use both the same seed
as well as the same architecture (32-bit vs. 64-bit).
"""
subprocess = kwargs.pop("subprocess", True)
rerun = kwargs.pop("rerun", 0)
# count up from 0, do not print 0
print_counter = lambda i : (print("rerun %d" % (rerun-i))
if rerun-i else None)
if subprocess:
# loop backwards so last i is 0
for i in range(rerun, -1, -1):
print_counter(i)
ret = run_in_subprocess_with_hash_randomization("_test",
function_args=paths, function_kwargs=kwargs)
if ret is False:
break
val = not bool(ret)
# exit on the first failure or if done
if not val or i == 0:
return val
# rerun even if hash randomization is not supported
for i in range(rerun, -1, -1):
print_counter(i)
val = not bool(_test(*paths, **kwargs))
if not val or i == 0:
return val
def _test(*paths, **kwargs):
"""
Internal function that actually runs the tests.
All keyword arguments from ``test()`` are passed to this function except for
``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstring of
``test()`` for more information.
"""
verbose = kwargs.get("verbose", False)
tb = kwargs.get("tb", "short")
kw = kwargs.get("kw", None) or ()
# ensure that kw is a tuple
if isinstance(kw, str):
kw = (kw, )
post_mortem = kwargs.get("pdb", False)
colors = kwargs.get("colors", True)
force_colors = kwargs.get("force_colors", False)
sort = kwargs.get("sort", True)
seed = kwargs.get("seed", None)
if seed is None:
seed = random.randrange(100000000)
timeout = kwargs.get("timeout", False)
slow = kwargs.get("slow", False)
enhance_asserts = kwargs.get("enhance_asserts", False)
split = kwargs.get('split', None)
blacklist = kwargs.get('blacklist', [])
blacklist = convert_to_native_paths(blacklist)
r = PyTestReporter(verbose=verbose, tb=tb, colors=colors,
force_colors=force_colors, split=split)
t = SymPyTests(r, kw, post_mortem, seed)
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
# Show deprecation warnings
import warnings
warnings.simplefilter("error", SymPyDeprecationWarning)
test_files = t.get_test_files('sympy')
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
t._testfiles.extend(matched)
return int(not t.test(sort=sort, timeout=timeout,
slow=slow, enhance_asserts=enhance_asserts))
def doctest(*paths, **kwargs):
"""
Runs doctests in all \*.py files in the sympy directory which match
any of the given strings in ``paths`` or all tests if paths=[].
Notes:
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.doctest() # doctest: +SKIP
Run one file:
>>> sympy.doctest("sympy/core/basic.py") # doctest: +SKIP
>>> sympy.doctest("polynomial.rst") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.doctest("/functions", "basic.py") # doctest: +SKIP
Run any file having polynomial in its name, doc/src/modules/polynomial.rst,
sympy/functions/special/polynomials.py, and sympy/polys/polynomial.py:
>>> sympy.doctest("polynomial") # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. Note that the regular doctests and the Sphinx
doctests are split independently. For instance, to run the first half of
the test suite:
>>> sympy.doctest(split='1/2') # doctest: +SKIP
The ``subprocess`` and ``verbose`` options are the same as with the function
``test()``. See the docstring of that function for more information.
"""
subprocess = kwargs.pop("subprocess", True)
rerun = kwargs.pop("rerun", 0)
# count up from 0, do not print 0
print_counter = lambda i : (print("rerun %d" % (rerun-i))
if rerun-i else None)
if subprocess:
# loop backwards so last i is 0
for i in range(rerun, -1, -1):
print_counter(i)
ret = run_in_subprocess_with_hash_randomization("_doctest",
function_args=paths, function_kwargs=kwargs)
if ret is False:
break
val = not bool(ret)
# exit on the first failure or if done
if not val or i == 0:
return val
# rerun even if hash randomization is not supported
for i in range(rerun, -1, -1):
print_counter(i)
val = not bool(_doctest(*paths, **kwargs))
if not val or i == 0:
return val
def _doctest(*paths, **kwargs):
"""
Internal function that actually runs the doctests.
All keyword arguments from ``doctest()`` are passed to this function
except for ``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstrings of
``doctest()`` and ``test()`` for more information.
"""
normal = kwargs.get("normal", False)
verbose = kwargs.get("verbose", False)
blacklist = kwargs.get("blacklist", [])
split = kwargs.get('split', None)
blacklist.extend([
"doc/src/modules/plotting.rst", # generates live plots
"sympy/utilities/compilef.py", # needs tcc
"sympy/physics/gaussopt.py", # raises deprecation warning
])
if import_module('numpy') is None:
blacklist.extend([
"sympy/plotting/experimental_lambdify.py",
"sympy/plotting/plot_implicit.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py",
"examples/intermediate/sample.py",
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py",
"doc/src/modules/numeric-computation.rst"
])
else:
if import_module('matplotlib') is None:
blacklist.extend([
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py"
])
else:
# don't display matplotlib windows
from sympy.plotting.plot import unset_show
unset_show()
if import_module('pyglet') is None:
blacklist.extend(["sympy/plotting/pygletplot"])
if import_module('theano') is None:
blacklist.extend(["doc/src/modules/numeric-computation.rst"])
# disabled because of doctest failures in asmeurer's bot
blacklist.extend([
"sympy/utilities/autowrap.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py"
])
# blacklist these modules until issue 4840 is resolved
blacklist.extend([
"sympy/conftest.py",
"sympy/utilities/benchmarking.py"
])
blacklist = convert_to_native_paths(blacklist)
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
# Show deprecation warnings
import warnings
warnings.simplefilter("error", SymPyDeprecationWarning)
r = PyTestReporter(verbose, split=split)
t = SymPyDocTests(r, normal)
test_files = t.get_test_files('sympy')
test_files.extend(t.get_test_files('examples', init_only=False))
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# take only what was requested...but not blacklisted items
# and allow for partial match anywhere or fnmatch of name
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
t._testfiles.extend(matched)
# run the tests and record the result for this *py portion of the tests
if t._testfiles:
failed = not t.test()
else:
failed = False
# N.B.
# --------------------------------------------------------------------
# Here we test *.rst files at or below doc/src. Code from these must
# be self supporting in terms of imports since there is no importing
# of necessary modules by doctest.testfile. If you try to pass *.py
# files through this they might fail because they will lack the needed
# imports and smarter parsing that can be done with source code.
#
test_files = t.get_test_files('doc/src', '*.rst', init_only=False)
test_files.sort()
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# Take only what was requested as long as it's not on the blacklist.
# Paths were already made native in *py tests so don't repeat here.
# There's no chance of having a *py file slip through since we
# only have *rst files in test_files.
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
setup_pprint()
first_report = True
for rst_file in matched:
if not os.path.isfile(rst_file):
continue
old_displayhook = sys.displayhook
try:
out = sympytestfile(
rst_file, module_relative=False, encoding='utf-8',
optionflags=pdoctest.ELLIPSIS | pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
finally:
# make sure we return to the original displayhook in case some
# doctest has changed that
sys.displayhook = old_displayhook
rstfailed, tested = out
if tested:
failed = rstfailed or failed
if first_report:
first_report = False
msg = 'rst doctests start'
if not t._testfiles:
r.start(msg=msg)
else:
r.write_center(msg)
print()
# use as the id, everything past the first 'sympy'
file_id = rst_file[rst_file.find('sympy') + len('sympy') + 1:]
print(file_id, end=" ")
# get at least the name out so it is know who is being tested
wid = r.terminal_width - len(file_id) - 1 # update width
test_file = '[%s]' % (tested)
report = '[%s]' % (rstfailed or 'OK')
print(''.join(
[test_file, ' '*(wid - len(test_file) - len(report)), report])
)
# the doctests for *py will have printed this message already if there was
# a failure, so now only print it if there was intervening reporting by
# testing the *rst as evidenced by first_report no longer being True.
if not first_report and failed:
print()
print("DO *NOT* COMMIT!")
return int(failed)
sp = re.compile(r'([0-9]+)/([1-9][0-9]*)')
def split_list(l, split):
"""
Splits a list into part a of b
split should be a string of the form 'a/b'. For instance, '1/3' would give
the split one of three.
If the length of the list is not divisible by the number of splits, the
last split will have more items.
>>> from sympy.utilities.runtests import split_list
>>> a = list(range(10))
>>> split_list(a, '1/3')
[0, 1, 2]
>>> split_list(a, '2/3')
[3, 4, 5]
>>> split_list(a, '3/3')
[6, 7, 8, 9]
"""
m = sp.match(split)
if not m:
raise ValueError("split must be a string of the form a/b where a and b are ints")
i, t = map(int, m.groups())
return l[(i - 1)*len(l)//t:i*len(l)//t]
from collections import namedtuple
SymPyTestResults = namedtuple('TestResults', 'failed attempted')
def sympytestfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False,
parser=pdoctest.DocTestParser(), encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg ``module_relative`` specifies how filenames
should be interpreted:
- If ``module_relative`` is True (the default), then ``filename``
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
``package`` argument is specified, then it is relative to that
package. To ensure os-independence, ``filename`` should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If ``module_relative`` is False, then ``filename`` specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg ``name`` gives the name of the test; by default
use the file's basename.
Optional keyword argument ``package`` is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify ``package`` if ``module_relative`` is False.
Optional keyword arg ``globs`` gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg ``extraglobs`` gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg ``verbose`` prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg ``report`` prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg ``optionflags`` or's together module constants,
and defaults to 0. Possible values (see the docs for details):
- DONT_ACCEPT_TRUE_FOR_1
- DONT_ACCEPT_BLANKLINE
- NORMALIZE_WHITESPACE
- ELLIPSIS
- SKIP
- IGNORE_EXCEPTION_DETAIL
- REPORT_UDIFF
- REPORT_CDIFF
- REPORT_NDIFF
- REPORT_ONLY_FIRST_FAILURE
Optional keyword arg ``raise_on_error`` raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg ``parser`` specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg ``encoding`` specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if not PY3:
text, filename = pdoctest._load_testfile(
filename, package, module_relative)
if encoding is not None:
text = text.decode(encoding)
else:
text, filename = pdoctest._load_testfile(
filename, package, module_relative, encoding)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = pdoctest.DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = SymPyDocTestRunner(verbose=verbose, optionflags=optionflags)
runner._checker = SymPyOutputChecker()
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test, compileflags=future_flags)
if report:
runner.summarize()
if pdoctest.master is None:
pdoctest.master = runner
else:
pdoctest.master.merge(runner)
return SymPyTestResults(runner.failures, runner.tries)
class SymPyTests(object):
def __init__(self, reporter, kw="", post_mortem=False,
seed=None):
self._post_mortem = post_mortem
self._kw = kw
self._count = 0
self._root_dir = sympy_dir
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._testfiles = []
self._seed = seed if seed is not None else random.random()
def test(self, sort=False, timeout=False, slow=False, enhance_asserts=False):
"""
Runs the tests returning True if all tests pass, otherwise False.
If sort=False run tests in random order.
"""
if sort:
self._testfiles.sort()
else:
from random import shuffle
random.seed(self._seed)
shuffle(self._testfiles)
self._reporter.start(self._seed)
for f in self._testfiles:
try:
self.test_file(f, sort, timeout, slow, enhance_asserts)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def _enhance_asserts(self, source):
from ast import (NodeTransformer, Compare, Name, Store, Load, Tuple,
Assign, BinOp, Str, Mod, Assert, parse, fix_missing_locations)
ops = {"Eq": '==', "NotEq": '!=', "Lt": '<', "LtE": '<=',
"Gt": '>', "GtE": '>=', "Is": 'is', "IsNot": 'is not',
"In": 'in', "NotIn": 'not in'}
class Transform(NodeTransformer):
def visit_Assert(self, stmt):
if isinstance(stmt.test, Compare):
compare = stmt.test
values = [compare.left] + compare.comparators
names = [ "_%s" % i for i, _ in enumerate(values) ]
names_store = [ Name(n, Store()) for n in names ]
names_load = [ Name(n, Load()) for n in names ]
target = Tuple(names_store, Store())
value = Tuple(values, Load())
assign = Assign([target], value)
new_compare = Compare(names_load[0], compare.ops, names_load[1:])
msg_format = "\n%s " + "\n%s ".join([ ops[op.__class__.__name__] for op in compare.ops ]) + "\n%s"
msg = BinOp(Str(msg_format), Mod(), Tuple(names_load, Load()))
test = Assert(new_compare, msg, lineno=stmt.lineno, col_offset=stmt.col_offset)
return [assign, test]
else:
return stmt
tree = parse(source)
new_tree = Transform().visit(tree)
return fix_missing_locations(new_tree)
def test_file(self, filename, sort=True, timeout=False, slow=False, enhance_asserts=False):
funcs = []
try:
gl = {'__file__': filename}
try:
if PY3:
open_file = lambda: open(filename, encoding="utf8")
else:
open_file = lambda: open(filename)
with open_file() as f:
source = f.read()
if self._kw:
for l in source.splitlines():
if l.lstrip().startswith('def '):
if any(l.find(k) != -1 for k in self._kw):
break
else:
return
if enhance_asserts:
try:
source = self._enhance_asserts(source)
except ImportError:
pass
code = compile(source, filename, "exec")
exec_(code, gl)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
self._reporter.import_error(filename, sys.exc_info())
return
clear_cache()
self._count += 1
random.seed(self._seed)
pytestfile = ""
if "XFAIL" in gl:
pytestfile = inspect.getsourcefile(gl["XFAIL"])
pytestfile2 = ""
if "slow" in gl:
pytestfile2 = inspect.getsourcefile(gl["slow"])
disabled = gl.get("disabled", False)
if not disabled:
# we need to filter only those functions that begin with 'test_'
# that are defined in the testing file or in the file where
# is defined the XFAIL decorator
funcs = [gl[f] for f in gl.keys() if f.startswith("test_") and
(inspect.isfunction(gl[f]) or inspect.ismethod(gl[f])) and
(inspect.getsourcefile(gl[f]) == filename or
inspect.getsourcefile(gl[f]) == pytestfile or
inspect.getsourcefile(gl[f]) == pytestfile2)]
if slow:
funcs = [f for f in funcs if getattr(f, '_slow', False)]
# Sorting of XFAILed functions isn't fixed yet :-(
funcs.sort(key=lambda x: inspect.getsourcelines(x)[1])
i = 0
while i < len(funcs):
if isgeneratorfunction(funcs[i]):
# some tests can be generators, that return the actual
# test functions. We unpack it below:
f = funcs.pop(i)
for fg in f():
func = fg[0]
args = fg[1:]
fgw = lambda: func(*args)
funcs.insert(i, fgw)
i += 1
else:
i += 1
# drop functions that are not selected with the keyword expression:
funcs = [x for x in funcs if self.matches(x)]
if not funcs:
return
except Exception:
self._reporter.entering_filename(filename, len(funcs))
raise
self._reporter.entering_filename(filename, len(funcs))
if not sort:
random.shuffle(funcs)
for f in funcs:
self._reporter.entering_test(f)
try:
if getattr(f, '_slow', False) and not slow:
raise Skipped("Slow")
if timeout:
self._timeout(f, timeout)
else:
random.seed(self._seed)
f()
except KeyboardInterrupt:
if getattr(f, '_slow', False):
self._reporter.test_skip("KeyboardInterrupt")
else:
raise
except Exception:
if timeout:
signal.alarm(0) # Disable the alarm. It could not be handled before.
t, v, tr = sys.exc_info()
if t is AssertionError:
self._reporter.test_fail((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
elif t.__name__ == "Skipped":
self._reporter.test_skip(v)
elif t.__name__ == "XFail":
self._reporter.test_xfail()
elif t.__name__ == "XPass":
self._reporter.test_xpass(v)
else:
self._reporter.test_exception((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def _timeout(self, function, timeout):
def callback(x, y):
signal.alarm(0)
raise Skipped("Timeout")
signal.signal(signal.SIGALRM, callback)
signal.alarm(timeout) # Set an alarm with a given timeout
function()
signal.alarm(0) # Disable the alarm
def matches(self, x):
"""
Does the keyword expression self._kw match "x"? Returns True/False.
Always returns True if self._kw is "".
"""
if not self._kw:
return True
for kw in self._kw:
if x.__name__.find(kw) != -1:
return True
return False
def get_test_files(self, dir, pat='test_*.py'):
"""
Returns the list of test_*.py (default) files at or below directory
``dir`` relative to the sympy home directory.
"""
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files if fnmatch(f, pat)])
return sorted([sys_normcase(gi) for gi in g])
class SymPyDocTests(object):
def __init__(self, reporter, normal):
self._count = 0
self._root_dir = sympy_dir
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._normal = normal
self._testfiles = []
def test(self):
"""
Runs the tests and returns True if all tests pass, otherwise False.
"""
self._reporter.start()
for f in self._testfiles:
try:
self.test_file(f)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def test_file(self, filename):
clear_cache()
from sympy.core.compatibility import StringIO
rel_name = filename[len(self._root_dir) + 1:]
dirname, file = os.path.split(filename)
module = rel_name.replace(os.sep, '.')[:-3]
if rel_name.startswith("examples"):
# Examples files do not have __init__.py files,
# So we have to temporarily extend sys.path to import them
sys.path.insert(0, dirname)
module = file[:-3] # remove ".py"
setup_pprint()
try:
module = pdoctest._normalize_module(module)
tests = SymPyDocTestFinder().find(module)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
self._reporter.import_error(filename, sys.exc_info())
return
finally:
if rel_name.startswith("examples"):
del sys.path[0]
tests = [test for test in tests if len(test.examples) > 0]
# By default tests are sorted by alphabetical order by function name.
# We sort by line number so one can edit the file sequentially from
# bottom to top. However, if there are decorated functions, their line
# numbers will be too large and for now one must just search for these
# by text and function name.
tests.sort(key=lambda x: -x.lineno)
if not tests:
return
self._reporter.entering_filename(filename, len(tests))
for test in tests:
assert len(test.examples) != 0
# check if there are external dependencies which need to be met
if '_doctest_depends_on' in test.globs:
if not self._process_dependencies(test.globs['_doctest_depends_on']):
self._reporter.test_skip()
continue
runner = SymPyDocTestRunner(optionflags=pdoctest.ELLIPSIS |
pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
runner._checker = SymPyOutputChecker()
old = sys.stdout
new = StringIO()
sys.stdout = new
# If the testing is normal, the doctests get importing magic to
# provide the global namespace. If not normal (the default) then
# then must run on their own; all imports must be explicit within
# a function's docstring. Once imported that import will be
# available to the rest of the tests in a given function's
# docstring (unless clear_globs=True below).
if not self._normal:
test.globs = {}
# if this is uncommented then all the test would get is what
# comes by default with a "from sympy import *"
#exec('from sympy import *') in test.globs
test.globs['print_function'] = print_function
try:
f, t = runner.run(test, compileflags=future_flags,
out=new.write, clear_globs=False)
except KeyboardInterrupt:
raise
finally:
sys.stdout = old
if f > 0:
self._reporter.doctest_fail(test.name, new.getvalue())
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def get_test_files(self, dir, pat='*.py', init_only=True):
"""
Returns the list of \*.py files (default) from which docstrings
will be tested which are at or below directory ``dir``. By default,
only those that have an __init__.py in their parent directory
and do not start with ``test_`` will be included.
"""
def importable(x):
"""
Checks if given pathname x is an importable module by checking for
__init__.py file.
Returns True/False.
Currently we only test if the __init__.py file exists in the
directory with the file "x" (in theory we should also test all the
parent dirs).
"""
init_py = os.path.join(os.path.dirname(x), "__init__.py")
return os.path.exists(init_py)
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files
if not f.startswith('test_') and fnmatch(f, pat)])
if init_only:
# skip files that are not importable (i.e. missing __init__.py)
g = [x for x in g if importable(x)]
return [sys_normcase(gi) for gi in g]
def _process_dependencies(self, deps):
"""
Returns ``False`` if some dependencies are not met and the test should be
skipped otherwise returns ``True``.
"""
executables = deps.get('exe', None)
moduledeps = deps.get('modules', None)
viewers = deps.get('disable_viewers', None)
pyglet = deps.get('pyglet', None)
# print deps
if executables is not None:
for ex in executables:
found = find_executable(ex)
if found is None:
return False
if moduledeps is not None:
for extmod in moduledeps:
if extmod == 'matplotlib':
matplotlib = import_module(
'matplotlib',
__import__kwargs={'fromlist':
['pyplot', 'cm', 'collections']},
min_module_version='1.0.0', catch=(RuntimeError,))
if matplotlib is not None:
pass
else:
return False
else:
# TODO min version support
mod = import_module(extmod)
if mod is not None:
version = "unknown"
if hasattr(mod, '__version__'):
version = mod.__version__
else:
return False
if viewers is not None:
import tempfile
tempdir = tempfile.mkdtemp()
os.environ['PATH'] = '%s:%s' % (tempdir, os.environ['PATH'])
if PY3:
vw = '#!/usr/bin/env python3\n' \
'import sys\n' \
'if len(sys.argv) <= 1:\n' \
' exit("wrong number of args")\n'
else:
vw = '#!/usr/bin/env python\n' \
'import sys\n' \
'if len(sys.argv) <= 1:\n' \
' exit("wrong number of args")\n'
for viewer in viewers:
with open(os.path.join(tempdir, viewer), 'w') as fh:
fh.write(vw)
# make the file executable
os.chmod(os.path.join(tempdir, viewer),
stat.S_IREAD | stat.S_IWRITE | stat.S_IXUSR)
if pyglet:
# monkey-patch pyglet s.t. it does not open a window during
# doctesting
import pyglet
class DummyWindow(object):
def __init__(self, *args, **kwargs):
self.has_exit=True
self.width = 600
self.height = 400
def set_vsync(self, x):
pass
def switch_to(self):
pass
def push_handlers(self, x):
pass
def close(self):
pass
pyglet.window.Window = DummyWindow
return True
class SymPyDocTestFinder(DocTestFinder):
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
Modified from doctest's version by looking harder for code in the
case that it looks like the the code comes from a different module.
In the case of decorated functions (e.g. @vectorize) they appear
to come from a different module (e.g. multidemensional) even though
their code is not there.
"""
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to ``tests``.
"""
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Make sure we don't run doctests for classes outside of sympy, such
# as in numpy or scipy.
if inspect.isclass(obj):
if obj.__module__.split('.')[0] != 'sympy':
return
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
if not self._recurse:
return
# Look for tests in a module's contained objects.
if inspect.ismodule(obj):
for rawname, val in obj.__dict__.items():
# Recurse to functions & classes.
if inspect.isfunction(val) or inspect.isclass(val):
# Make sure we don't run doctests functions or classes
# from different modules
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (rawname %s)" % (val, module, rawname)
try:
valname = '%s.%s' % (name, rawname)
self._find(tests, val, valname, module,
source_lines, globs, seen)
except KeyboardInterrupt:
raise
# Look for tests in a module's __test__ dictionary.
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, string_types):
raise ValueError("SymPyDocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, string_types)):
raise ValueError("SymPyDocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj):
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if (inspect.isfunction(val) or
inspect.isclass(val) or
isinstance(val, property)):
# Make sure we don't run doctests functions or classes
# from different modules
if isinstance(val, property):
if hasattr(val.fget, '__module__'):
if val.fget.__module__ != module.__name__:
continue
else:
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (valname %s)" % (
val, module, valname)
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
lineno = None
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, string_types):
# obj is a string in the case for objects in the polys package.
# Note that source_lines is a binary string (compiled polys
# modules), which can't be handled by _find_lineno so determine
# the line number here.
docstring = obj
matches = re.findall("line \d+", name)
assert len(matches) == 1, \
"string '%s' does not contain lineno " % name
# NOTE: this is not the exact linenumber but its better than no
# lineno ;)
lineno = int(matches[0][5:])
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, string_types):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# check that properties have a docstring because _find_lineno
# assumes it
if isinstance(obj, property):
if obj.fget.__doc__ is None:
return None
# Find the docstring's location in the file.
if lineno is None:
# handling of properties is not implemented in _find_lineno so do
# it here
if hasattr(obj, 'func_closure') and obj.func_closure is not None:
tobj = obj.func_closure[0].cell_contents
elif isinstance(obj, property):
tobj = obj.fget
else:
tobj = obj
lineno = self._find_lineno(tobj, source_lines)
if lineno is None:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
if hasattr(obj, '_doctest_depends_on'):
globs['_doctest_depends_on'] = obj._doctest_depends_on
else:
globs['_doctest_depends_on'] = {}
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
class SymPyDocTestRunner(DocTestRunner):
"""
A class used to run DocTest test cases, and accumulate statistics.
The ``run`` method is used to process a single DocTest case. It
returns a tuple ``(f, t)``, where ``t`` is the number of test cases
tried, and ``f`` is the number of test cases that failed.
Modified from the doctest version to not reset the sys.displayhook (see
issue 5140).
See the docstring of the original DocTestRunner for more information.
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in ``test``, and display the results using the
writer function ``out``.
The examples are run in the namespace ``test.globs``. If
``clear_globs`` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use ``clear_globs=False``.
``compileflags`` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to ``globs``.
The output of each example is checked using
``SymPyDocTestRunner.check_output``, and the results are
formatted by the ``SymPyDocTestRunner.report_*`` methods.
"""
self.test = test
if compileflags is None:
compileflags = pdoctest._extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = pdoctest._OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = pdoctest.linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
test.globs['print_function'] = print_function
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
# We have to override the name mangled methods.
SymPyDocTestRunner._SymPyDocTestRunner__patched_linecache_getlines = \
DocTestRunner._DocTestRunner__patched_linecache_getlines
SymPyDocTestRunner._SymPyDocTestRunner__run = DocTestRunner._DocTestRunner__run
SymPyDocTestRunner._SymPyDocTestRunner__record_outcome = \
DocTestRunner._DocTestRunner__record_outcome
class SymPyOutputChecker(pdoctest.OutputChecker):
"""
Compared to the OutputChecker from the stdlib our OutputChecker class
supports numerical comparison of floats occuring in the output of the
doctest examples
"""
def __init__(self):
# NOTE OutputChecker is an old-style class with no __init__ method,
# so we can't call the base class version of __init__ here
got_floats = r'(\d+\.\d*|\.\d+)'
# floats in the 'want' string may contain ellipses
want_floats = got_floats + r'(\.{3})?'
front_sep = r'\s|\+|\-|\*|,'
back_sep = front_sep + r'|j|e'
fbeg = r'^%s(?=%s|$)' % (got_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, got_floats, back_sep)
self.num_got_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
fbeg = r'^%s(?=%s|$)' % (want_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, want_floats, back_sep)
self.num_want_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# TODO parse integers as well ?
# Parse floats and compare them. If some of the parsed floats contain
# ellipses, skip the comparison.
matches = self.num_got_rgx.finditer(got)
numbers_got = [match.group(1) for match in matches] # list of strs
matches = self.num_want_rgx.finditer(want)
numbers_want = [match.group(1) for match in matches] # list of strs
if len(numbers_got) != len(numbers_want):
return False
if len(numbers_got) > 0:
nw_ = []
for ng, nw in zip(numbers_got, numbers_want):
if '...' in nw:
nw_.append(ng)
continue
else:
nw_.append(nw)
if abs(float(ng)-float(nw)) > 1e-5:
return False
got = self.num_got_rgx.sub(r'%s', got)
got = got % tuple(nw_)
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & pdoctest.NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & pdoctest.ELLIPSIS:
if pdoctest._ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
class Reporter(object):
"""
Parent class for all reporters.
"""
pass
class PyTestReporter(Reporter):
"""
Py.test like reporter. Should produce output identical to py.test.
"""
def __init__(self, verbose=False, tb="short", colors=True,
force_colors=False, split=None):
self._verbose = verbose
self._tb_style = tb
self._colors = colors
self._force_colors = force_colors
self._xfailed = 0
self._xpassed = []
self._failed = []
self._failed_doctest = []
self._passed = 0
self._skipped = 0
self._exceptions = []
self._terminal_width = None
self._default_width = 80
self._split = split
# this tracks the x-position of the cursor (useful for positioning
# things on the screen), without the need for any readline library:
self._write_pos = 0
self._line_wrap = False
def root_dir(self, dir):
self._root_dir = dir
@property
def terminal_width(self):
if self._terminal_width is not None:
return self._terminal_width
def findout_terminal_width():
if sys.platform == "win32":
# Windows support is based on:
#
# http://code.activestate.com/recipes/
# 440694-determine-size-of-console-window-on-windows/
from ctypes import windll, create_string_buffer
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
import struct
(_, _, _, _, _, left, _, right, _, _, _) = \
struct.unpack("hhhhHhhhhhh", csbi.raw)
return right - left
else:
return self._default_width
if hasattr(sys.stdout, 'isatty') and not sys.stdout.isatty():
return self._default_width # leave PIPEs alone
try:
process = subprocess.Popen(['stty', '-a'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout = process.stdout.read()
if PY3:
stdout = stdout.decode("utf-8")
except (OSError, IOError):
pass
else:
# We support the following output formats from stty:
#
# 1) Linux -> columns 80
# 2) OS X -> 80 columns
# 3) Solaris -> columns = 80
re_linux = r"columns\s+(?P<columns>\d+);"
re_osx = r"(?P<columns>\d+)\s*columns;"
re_solaris = r"columns\s+=\s+(?P<columns>\d+);"
for regex in (re_linux, re_osx, re_solaris):
match = re.search(regex, stdout)
if match is not None:
columns = match.group('columns')
try:
width = int(columns)
except ValueError:
pass
if width != 0:
return width
return self._default_width
width = findout_terminal_width()
self._terminal_width = width
return width
def write(self, text, color="", align="left", width=None,
force_colors=False):
"""
Prints a text on the screen.
It uses sys.stdout.write(), so no readline library is necessary.
Parameters
==========
color : choose from the colors below, "" means default color
align : "left"/"right", "left" is a normal print, "right" is aligned on
the right-hand side of the screen, filled with spaces if
necessary
width : the screen width
"""
color_templates = (
("Black", "0;30"),
("Red", "0;31"),
("Green", "0;32"),
("Brown", "0;33"),
("Blue", "0;34"),
("Purple", "0;35"),
("Cyan", "0;36"),
("LightGray", "0;37"),
("DarkGray", "1;30"),
("LightRed", "1;31"),
("LightGreen", "1;32"),
("Yellow", "1;33"),
("LightBlue", "1;34"),
("LightPurple", "1;35"),
("LightCyan", "1;36"),
("White", "1;37"),
)
colors = {}
for name, value in color_templates:
colors[name] = value
c_normal = '\033[0m'
c_color = '\033[%sm'
if width is None:
width = self.terminal_width
if align == "right":
if self._write_pos + len(text) > width:
# we don't fit on the current line, create a new line
self.write("\n")
self.write(" "*(width - self._write_pos - len(text)))
if not self._force_colors and hasattr(sys.stdout, 'isatty') and not \
sys.stdout.isatty():
# the stdout is not a terminal, this for example happens if the
# output is piped to less, e.g. "bin/test | less". In this case,
# the terminal control sequences would be printed verbatim, so
# don't use any colors.
color = ""
elif sys.platform == "win32":
# Windows consoles don't support ANSI escape sequences
color = ""
elif not self._colors:
color = ""
if self._line_wrap:
if text[0] != "\n":
sys.stdout.write("\n")
# Avoid UnicodeEncodeError when printing out test failures
if PY3 and IS_WINDOWS:
text = text.encode('raw_unicode_escape').decode('utf8', 'ignore')
elif PY3 and not sys.stdout.encoding.lower().startswith('utf'):
text = text.encode(sys.stdout.encoding, 'backslashreplace'
).decode(sys.stdout.encoding)
if color == "":
sys.stdout.write(text)
else:
sys.stdout.write("%s%s%s" %
(c_color % colors[color], text, c_normal))
sys.stdout.flush()
l = text.rfind("\n")
if l == -1:
self._write_pos += len(text)
else:
self._write_pos = len(text) - l - 1
self._line_wrap = self._write_pos >= width
self._write_pos %= width
def write_center(self, text, delim="="):
width = self.terminal_width
if text != "":
text = " %s " % text
idx = (width - len(text)) // 2
t = delim*idx + text + delim*(width - idx - len(text))
self.write(t + "\n")
def write_exception(self, e, val, tb):
t = traceback.extract_tb(tb)
# remove the first item, as that is always runtests.py
t = t[1:]
t = traceback.format_list(t)
self.write("".join(t))
t = traceback.format_exception_only(e, val)
self.write("".join(t))
def start(self, seed=None, msg="test process starts"):
self.write_center(msg)
executable = sys.executable
v = tuple(sys.version_info)
python_version = "%s.%s.%s-%s-%s" % v
implementation = platform.python_implementation()
if implementation == 'PyPy':
implementation += " %s.%s.%s-%s-%s" % sys.pypy_version_info
self.write("executable: %s (%s) [%s]\n" %
(executable, python_version, implementation))
from .misc import ARCH
self.write("architecture: %s\n" % ARCH)
from sympy.core.cache import USE_CACHE
self.write("cache: %s\n" % USE_CACHE)
from sympy.core.compatibility import GROUND_TYPES, HAS_GMPY
version = ''
if GROUND_TYPES =='gmpy':
if HAS_GMPY == 1:
import gmpy
elif HAS_GMPY == 2:
import gmpy2 as gmpy
version = gmpy.version()
self.write("ground types: %s %s\n" % (GROUND_TYPES, version))
if seed is not None:
self.write("random seed: %d\n" % seed)
from .misc import HASH_RANDOMIZATION
self.write("hash randomization: ")
hash_seed = os.getenv("PYTHONHASHSEED") or '0'
if HASH_RANDOMIZATION and (hash_seed == "random" or int(hash_seed)):
self.write("on (PYTHONHASHSEED=%s)\n" % hash_seed)
else:
self.write("off\n")
if self._split:
self.write("split: %s\n" % self._split)
self.write('\n')
self._t_start = clock()
def finish(self):
self._t_end = clock()
self.write("\n")
global text, linelen
text = "tests finished: %d passed, " % self._passed
linelen = len(text)
def add_text(mytext):
global text, linelen
"""Break new text if too long."""
if linelen + len(mytext) > self.terminal_width:
text += '\n'
linelen = 0
text += mytext
linelen += len(mytext)
if len(self._failed) > 0:
add_text("%d failed, " % len(self._failed))
if len(self._failed_doctest) > 0:
add_text("%d failed, " % len(self._failed_doctest))
if self._skipped > 0:
add_text("%d skipped, " % self._skipped)
if self._xfailed > 0:
add_text("%d expected to fail, " % self._xfailed)
if len(self._xpassed) > 0:
add_text("%d expected to fail but passed, " % len(self._xpassed))
if len(self._exceptions) > 0:
add_text("%d exceptions, " % len(self._exceptions))
add_text("in %.2f seconds" % (self._t_end - self._t_start))
if len(self._xpassed) > 0:
self.write_center("xpassed tests", "_")
for e in self._xpassed:
self.write("%s: %s\n" % (e[0], e[1]))
self.write("\n")
if self._tb_style != "no" and len(self._exceptions) > 0:
for e in self._exceptions:
filename, f, (t, val, tb) = e
self.write_center("", "_")
if f is None:
s = "%s" % filename
else:
s = "%s:%s" % (filename, f.__name__)
self.write_center(s, "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed) > 0:
for e in self._failed:
filename, f, (t, val, tb) = e
self.write_center("", "_")
self.write_center("%s:%s" % (filename, f.__name__), "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed_doctest) > 0:
for e in self._failed_doctest:
filename, msg = e
self.write_center("", "_")
self.write_center("%s" % filename, "_")
self.write(msg)
self.write("\n")
self.write_center(text)
ok = len(self._failed) == 0 and len(self._exceptions) == 0 and \
len(self._failed_doctest) == 0
if not ok:
self.write("DO *NOT* COMMIT!\n")
return ok
def entering_filename(self, filename, n):
rel_name = filename[len(self._root_dir) + 1:]
self._active_file = rel_name
self._active_file_error = False
self.write(rel_name)
self.write("[%d] " % n)
def leaving_filename(self):
self.write(" ")
if self._active_file_error:
self.write("[FAIL]", "Red", align="right")
else:
self.write("[OK]", "Green", align="right")
self.write("\n")
if self._verbose:
self.write("\n")
def entering_test(self, f):
self._active_f = f
if self._verbose:
self.write("\n" + f.__name__ + " ")
def test_xfail(self):
self._xfailed += 1
self.write("f", "Green")
def test_xpass(self, v):
message = str(v)
self._xpassed.append((self._active_file, message))
self.write("X", "Green")
def test_fail(self, exc_info):
self._failed.append((self._active_file, self._active_f, exc_info))
self.write("F", "Red")
self._active_file_error = True
def doctest_fail(self, name, error_msg):
# the first line contains "******", remove it:
error_msg = "\n".join(error_msg.split("\n")[1:])
self._failed_doctest.append((name, error_msg))
self.write("F", "Red")
self._active_file_error = True
def test_pass(self, char="."):
self._passed += 1
if self._verbose:
self.write("ok", "Green")
else:
self.write(char, "Green")
def test_skip(self, v=None):
char = "s"
self._skipped += 1
if v is not None:
message = str(v)
if message == "KeyboardInterrupt":
char = "K"
elif message == "Timeout":
char = "T"
elif message == "Slow":
char = "w"
self.write(char, "Blue")
if self._verbose:
self.write(" - ", "Blue")
if v is not None:
self.write(message, "Blue")
def test_exception(self, exc_info):
self._exceptions.append((self._active_file, self._active_f, exc_info))
self.write("E", "Red")
self._active_file_error = True
def import_error(self, filename, exc_info):
self._exceptions.append((filename, None, exc_info))
rel_name = filename[len(self._root_dir) + 1:]
self.write(rel_name)
self.write("[?] Failed to import", "Red")
self.write(" ")
self.write("[FAIL]", "Red", align="right")
self.write("\n")
sympy_dir = get_sympy_dir()
|
Sumith1896/sympy
|
sympy/utilities/runtests.py
|
Python
|
bsd-3-clause
| 78,928
|
[
"VisIt"
] |
5dd1bccf0b8851d37cee16b250ab54da69b0b649ec50a2e89ac6b00e74d64320
|
"""cdbgui.py
Developers: Christina Hammer, Noelle Todd
Last Updated: August 19, 2014
This file contains a class version of the interface, in an effort to
make a program with no global variables.
"""
from datetime import datetime, timedelta, date
from tkinter import *
from tkinter import ttk
from cdbifunc2 import *
from volunteerlog import *
class allobjects:
"""This class attempts to contain ALL labels, entries, etc.,
so that there are no global variables.
"""
def __init__(self):
"""This function declares all variables that are used by
more than one function.
"""
#Variables used later on
self.cursel = 0
self.selectedVisit = 0
self.id_list = []
self.mem_list = []
self.clientlist = list_people()
self.visitDict = {}
#holds entryboxes for family members
self.memDict = {}
self.info = {}
self.addmemberON = False #checks if member boxes have already been added
#dictionaries/lists used for date entry
self.month_li = ["January", "February", "March", "April",
"May", "June", "July", "August", "September",
"October", "November", "December"]
self.month_day_dict = {"January":31, "February":29, "March":31,
"April":30, "May":31, "June":30, "July":31,
"August":31, "September":30, "October":31,
"November":30, "December":31}
self.month_int = {1:"January", 2:"February", 3:"March",
4:"April", 5:"May", 6:"June", 7:"July",
8:"August", 9:"September", 10:"October",
11:"November", 12:"December"}
self.int_month = {"January":1, "February":2, "March":3,
"April":4, "May":5, "June":6, "July":7,
"August":8, "September":9, "October":10,
"November":11, "December":12}
#customize colors/fonts
#This will connect to the database itself,
#and retrieve the colors from there.
self.bgcolor = 'light blue' #'lavender'
#self.labfont = 'Helvetica'
#self.labBGcolor = 'gray10'
#self.labFGcolor = 'white'
#self.cliSearLabBG = 'Coral'
#self.cliSearLabFG = 'white'
#configuring window
self.ciGui=Tk()
self.gridframe=Frame(self.ciGui).grid()
self.ciGui.configure(background=self.bgcolor)
self.ciGui.title('Food Pantry Database')
#self.volscreen = VolunteerDisplay(self.gridframe, self.bgcolor)
#CLIENT SEARCH SETUP
self.cslabel = Label(self.gridframe,text='Client Search',
font=("Helvetica", 16),fg='white',bg='Coral')\
.grid(row=0,column=0,columnspan=2, sticky=W)
self.csblank = Label(self.gridframe, text=' ',
font=('Helvetica',10), bg=self.bgcolor)\
.grid(row=0,column=2,sticky=E)
#name searchbox
self.ns = StringVar()
self.nameSearchEnt = Entry(self.gridframe, cursor = 'shuttle',
textvariable=self.ns)
self.nameSearchEnt.grid(row=2,column=0)
self.nameSearchEnt.bind('<Key>',self.nameSearch)
self.searchButton = Button(self.gridframe, text='Search Clients',
command=self.nameSearch)
self.searchButton.grid(row=2, column=1)
#client listbox (clframe)
self.client_listbox = Listbox(self.gridframe,height=10,width=40)
self.client_listbox.bind('<<ListboxSelect>>', self.displayInfo )
self.client_listbox.config(exportselection=0)
self.scrollb = Scrollbar(self.gridframe)
self.client_listbox.bind('<<ListboxSelect>>',self.displayInfo )
self.client_listbox.config(yscrollcommand=self.scrollb.set)
self.scrollb.config(command=self.client_listbox.yview)
self.client_listbox.grid(row=3, column=0, rowspan=5, columnspan=2)
self.scrollb.grid(row=3, column=1, rowspan=5, sticky=E+N+S)
self.firstSep = ttk.Separator(self.gridframe, orient='vertical')\
.grid(row=1,column=2,rowspan=40,sticky=NS)
self.NCButton = Button(self.gridframe, text='New Client',
command=self.newClientDisplay, width=25)\
.grid(row=9, column=0, columnspan=2)
#CLIENT INFORMATION SETUP
self.secondSep = ttk.Separator(self.gridframe, orient='horizontal')\
.grid(row=0,column=3,columnspan=40,sticky=EW)
self.cilabel = Label(self.gridframe, text='Client Information',
font=("Helvetica", 16),fg='white',bg='gray10')\
.grid(row=0,column=3,columnspan=12, sticky=W)
self.ciblank = Label(self.gridframe, text=' ',font=('Helvetica',10),
bg=self.bgcolor).grid(row=1,column=3,sticky=E)
#First name
self.fnv = StringVar()
self.fnlabel = Label(self.gridframe, text="First Name: ",
font=('Helvetica',12),bg=self.bgcolor)\
.grid(row=2, column=3,rowspan=2,sticky=E)
self.fname = Entry(self.gridframe, textvariable=self.fnv,bd=4)
self.fname.grid(row=2, column=4, rowspan=2, columnspan=1, sticky=W)
#Last name
self.lnv = StringVar()
self.lnlabel = Label(self.gridframe, text='Last Name: ',
font=('Helvetica',12),bg=self.bgcolor)\
.grid(row=2,column=5,rowspan=2, sticky=W)
self.lname = Entry(self.gridframe, textvariable=self.lnv,bd=4)
self.lname.grid(row=2,column=6, rowspan=2, columnspan=1, sticky=W)
#Phone
self.phv = StringVar()
self.phlabel = Label(self.gridframe, text='Phone: ',
font=('Helvetica',12),bg=self.bgcolor)\
.grid(row=2, column=7,rowspan=2, sticky=E)
self.phone = Entry(self.gridframe, textvariable=self.phv, bd=4)
self.phone.grid(row=2, column=8, columnspan=2, rowspan=2, sticky=W)
#Date of Birth
self.doblabel = Label(self.gridframe, text='Date of Birth: ',
font=('Helvetica',12),bg=self.bgcolor)\
.grid(row=4,column=3, rowspan=2, sticky=E)
self.mv = StringVar()
self.dv = StringVar()
self.yv = StringVar()
#dob month combobox
self.mob = ttk.Combobox(self.gridframe, width=10, state='readonly',
values=self.month_li, textvariable=self.mv)
self.mob.bind('<<ComboboxSelected>>', self.monthbox_select)
#dob day spinbox
self.dob = Spinbox(self.gridframe, from_=0, to=0,
textvariable=self.dv, width=5, bd=4)
#dob year spinbox
self.yob = Spinbox(self.gridframe, from_=1900, to=2500,
textvariable=self.yv, width=7, bd=4)
self.mob.grid(row=4, column=4, rowspan=2, sticky=W)
self.dob.grid(row=4, column=4, rowspan=2, sticky=E)
self.yob.grid(row=4, column=5, rowspan=2)
#Age
self.agev = StringVar()
#self.alabel = Label(self.gridframe, text='Age: ',font=('Helvetica',12),
# bg=self.bgcolor).grid(row=4,column=6, rowspan=2, sticky=W)
self.avallabel = Label(self.gridframe, textvariable=self.agev,
font=('Helvetica',12),bg=self.bgcolor)\
.grid(row=4,column=6, rowspan=2)
#Date Joined
self.datejoinv = StringVar()
self.djlabel = Label(self.gridframe, text="Date Joined:",
font=('Helvetica',12), bg=self.bgcolor)\
.grid(row=4,column=7,rowspan=2, sticky=E)
self.djEntry = Entry(self.gridframe, textvariable=self.datejoinv,
bd=4).grid(row=4, column=8, rowspan=2)
#Extra blank label
#self.blankLab = Label(self.gridframe, text=' ',font=('Helvetica',10),
# bg=self.bgcolor).grid(row=5,column=3,sticky=E)
#VISIT INFORMATION SETUP
self.thirdSep = ttk.Separator(self.gridframe, orient='horizontal')\
.grid(row=6,column=3,columnspan=40,sticky=EW)
self.vilabel = Label(self.gridframe,text='Visit Information',
font=("Helvetica", 16),fg='white', bg='gray10')\
.grid(row=6,column=3,columnspan=12, sticky=W)
# self.viblank = Label(self.gridframe, text=' ',font=('Helvetica',10),
# bg=self.bgcolor).grid(row=7,column=3,sticky=E)
self.datelab = Label(self.gridframe, text='Date: ',
font=('Helvetica',14), bg=self.bgcolor)\
.grid(row=7,column=3)
self.notelab = Label(self.gridframe, text='Notes:',
font=('Helvetica',14), bg=self.bgcolor)\
.grid(row=7,column=4)
self.vislab = Label(self.gridframe, text='Visitor: ',
font=('Helvetica',14),bg=self.bgcolor)\
.grid(row=7,column=7, padx=10)
self.vollab = Label(self.gridframe, text='Volunteer: ',
font=('Helvetica',14),bg=self.bgcolor)\
.grid(row=9, column=7, padx=10)
#self.viblank = Label(self.gridframe, text=' ',font=('Helvetica',10),
# bg=self.bgcolor).grid(row=23,column=0,sticky=E)
#visit date
#This listbox contains three dates
#CHANGE FOCUS TO FIRST VISIT DATE
#
self.visit_listbox = Listbox(self.gridframe,height=4,width=15,font=12, bd=4)
self.visit_listbox.bind('<<ListboxSelect>>', self.displayVisit)
self.visit_listbox.config(exportselection=0)
self.visit_scroll = Scrollbar(self.gridframe)
self.visit_listbox.config(yscrollcommand=self.visit_scroll.set)
self.visit_scroll.config(command=self.visit_listbox.yview)
self.visit_listbox.grid(row=8, column=3, rowspan=4, columnspan=1, sticky=W)
self.visit_scroll.grid(row=8, column=3, rowspan=4, columnspan=1, sticky=E+N+S)
#Entry box for visit (when new visit is added)
self.visdatev = StringVar()
self.visitdate = Entry(self.gridframe,textvariable=self.visdatev,bd=4)
#self.visitdate.grid(row=8, column=3)
#visit notes
self.notv = StringVar()
self.notescv = Text(self.gridframe, state='disabled', width=50, height=4, bd=4, font='Helvetica')
self.vnotes_scroll = Scrollbar(self.gridframe)
self.notescv.config(yscrollcommand=self.vnotes_scroll.set)
self.vnotes_scroll.config(command=self.notescv.yview)
#visit notes
#self.notescv = Entry(self.gridframe, textvariable=self.notv,
# width=60,bd=4)
self.notescv.grid(row=8, column=4, columnspan=3, rowspan=4, sticky=W, padx=10)
self.vnotes_scroll.grid(row=8, column=4, rowspan=4, columnspan=3, sticky=E+N+S)
#visit visitor
self.visv = StringVar()
self.visitor = Label(self.gridframe,textvariable=self.visv, bd=4,
font=('Helvetica',10),bg=self.bgcolor)
self.visitor = Entry(self.gridframe,textvariable=self.visv,
state='readonly',bd=4)
self.visitor.grid(row=8, column=7, rowspan=1, sticky=E, padx=10)
#visit volunteer
#SET TO READONLY!!!
self.volv = IntVar()
self.volun = Entry(self.gridframe,textvariable=self.volv,bd=4)
self.volun.grid(row=10, column=7, rowspan=1, padx=10)
"""
#visit volunteer
self.volv = IntVar()
self.volun = Entry(self.gridframe,textvariable=self.volv,width=8,bd=4)
self.volun.grid(row=9, column=8, rowspan=2)
"""
#Extra blank label
self.blankLab2 = Label(self.gridframe, text=' ',
font=('Helvetica',10), bg=self.bgcolor)\
.grid(row=13,column=3, rowspan=2, sticky=E)
#Visit buttons
self.newVisit = Button(self.gridframe, text='New Visit', width=15,
command=self.newvisitf)
self.newVisit.grid(row=8, column=8, sticky=W)
self.editVisit = Button(self.gridframe, text='Edit Visit', width=15,
command=self.editvisitf)
self.editVisit.grid(row=9, column=8, sticky=W)
self.deleteVisit = Button(self.gridframe, text='Delete Visit', width=15,
command=self.deletevisitf)
self.deleteVisit.grid(row=10, column=8, sticky=W)
#records/updates visit
self.saveVisit = Button(self.gridframe, text='Save Visit', width=15,
command=self.recordVisit)
self.saveVisitE = Button(self.gridframe, text='Save Visit', width=15,
command=self.savevisitf)
#self.saveVisit.grid(row=8,column=8,sticky=W)
self.cancelVisit = Button(self.gridframe, text='Cancel', width=15,
command=self.cancelvisitf)
#self.cancelVisit.grid(row=9, column=8, sticky=W)
#HOUSEHOLD INFORMATION SETUP
self.fourthSep = ttk.Separator(self.gridframe, orient='horizontal')\
.grid(row=15,column=3,columnspan=40,sticky=EW)
self.hilabel = Label(self.gridframe,text='Household Information',
font=("Helvetica", 16),fg='white', bg='gray10')\
.grid(row=15,column=3,columnspan=12, sticky=W)
#blank line
self.hiblank = Label(self.gridframe, text=' ',font=('Helvetica',10),
bg=self.bgcolor).grid(row=16,column=3,sticky=E)
#street address
self.adv = StringVar()
self.adlab = Label(self.gridframe, text='Address: ',
font=('Helvetica',12), bg=self.bgcolor)\
.grid(row=17,column=3, rowspan=2, sticky=E)
self.address = Entry(self.gridframe,textvariable=self.adv,
width=40,bd=4)
self.address.grid(row=17, column=4,columnspan=2, rowspan=2)
#apartment
self.apv = StringVar()
self.aplab = Label(self.gridframe, text='Apt: ',font=('Helvetica',12),
bg=self.bgcolor).grid(row=17,column=6,
rowspan=2, sticky=E)
self.aptn = Entry(self.gridframe,textvariable=self.apv,width=10,bd=4)
self.aptn.grid(row=17,column=7, rowspan=2, sticky=W)
#city
self.ctyv = StringVar()
cilab = Label(self.gridframe, text='City: ',font=('Helvetica',12),
bg=self.bgcolor).grid(row=17,column=8, rowspan=2, sticky=E)
self.city = Entry(self.gridframe,textvariable=self.ctyv,bd=4)
self.city.grid(row=17,column=9, rowspan=2, sticky=W)#, pady=10)
#Label(self.gridframe, text=' ',bg=self.bgcolor).grid(row=18, pady=5)
#state
self.stav = StringVar()
self.stlab = Label(self.gridframe, text='State: ',
font=('Helvetica',12), bg=self.bgcolor)\
.grid(row=20,column=3, rowspan=2, sticky=E)
self.state = Entry(self.gridframe,textvariable=self.stav,bd=4)
self.state.grid(row=20,column=4, rowspan=2)
#zip
self.zpv = StringVar()
self.zilab = Label(self.gridframe, text='Zip Code: ',font=('Helvetica',12),
bg=self.bgcolor).grid(row=20, column=5, rowspan=2, sticky=E)
self.zipc = Entry(self.gridframe,textvariable=self.zpv,bd=4)
self.zipc.grid(row=20, column=6, rowspan=2)
#Date Verified
self.dverilabel = Label(self.gridframe, text='Last Verified: ',
font=('Helvetica',12),bg=self.bgcolor)\
.grid(row=20,column=7, rowspan=2, sticky=E)
self.mvv = StringVar()
self.dvv = StringVar()
self.yvv = StringVar()
self.mvv.set("")
self.dvv.set("")
self.yvv.set("")
#for month entry
self.mov = ttk.Combobox(self.gridframe, width=10, state='readonly',
values=self.month_li, textvariable=self.mvv)
#self.mob.bind('<<ComboboxSelected>>', self.monthbox_select)
#for day entry
self.dov = Spinbox(self.gridframe, from_=0, to=0,
textvariable=self.dvv, width=5, bd=4)
#for year entry
self.yov = Spinbox(self.gridframe, from_=1900, to=2500,
textvariable=self.yvv, width=9, bd=4)
self.mov.grid(row=20, column=8, rowspan=2, sticky=E, padx=10)
self.dov.grid(row=20, column=9, columnspan=2, rowspan=2, padx=10, sticky=W)
self.yov.grid(row=20, column=10, rowspan=2, padx=10, sticky=W)
#formatting labels/objects
self.blankLab5 = Label(self.gridframe, text=' ',
font=('Helvetica',12), bg=self.bgcolor)\
.grid(row=23,column=3,sticky=E)
self.blankLab6 = Label(self.gridframe, text=' ',
font=('Helvetica',10), bg=self.bgcolor)\
.grid(row=25,column=3,sticky=E)
self.fifthsep = ttk.Separator(self.gridframe, orient='horizontal')\
.grid(row=27,column=3,columnspan=40,sticky=EW, pady=10)
#The following variables will be removed and re-gridded
#as the function of the interface changes.
#
#HOUSEHOLD MEMBERS SETUP
#These variables appear on the updateClientDisplay only
#
#info display widgets
self.adl = StringVar()
self.dispad = Label(self.gridframe,textvariable=self.adl,
font=('Helvetica',12),bg=self.bgcolor)
self.chil = StringVar()
self.dischil = Label(self.gridframe,textvariable=self.chil,
font=('Helvetica',12),bg=self.bgcolor)
self.sen = StringVar()
self.dissen = Label(self.gridframe,textvariable=self.sen,
font=('Helvetica',12),bg=self.bgcolor)
self.inf = StringVar()
self.disinf = Label(self.gridframe,textvariable=self.inf,
font=('Helvetica',12),bg=self.bgcolor)
self.tot = StringVar()
self.distot = Label(self.gridframe, textvariable=self.tot,
bg=self.bgcolor,font=('Helvetica',12))
self.houseSep = ttk.Separator(self.gridframe, orient='horizontal')
self.houseSep.grid(row=23,column=3,columnspan=40,sticky=EW)
self.housetitle = Label(self.gridframe,text='Household Members',
font=("Helvetica", 16),fg='white',bg='gray10')
self.housetitle.grid(row=23,column=3,columnspan=12, sticky=W)
#listbox of family members
self.family_listbox = Listbox(self.gridframe,height=5,width=30,font=12)
self.family_listbox.config(exportselection=0)
self.fam_scroll = Scrollbar(self.gridframe)
self.family_listbox.config(yscrollcommand=self.fam_scroll.set)
self.fam_scroll.config(command=self.family_listbox.yview)
self.family_listbox.grid(row=24, column=3, rowspan=3, columnspan=2, sticky=W)
self.fam_scroll.grid(row=24, column=4, rowspan=3, columnspan=1, sticky=E+N+S)
#family member buttons
self.addmemb = Button(self.gridframe, text='Add Member', width=14,
command=self.addMemberEntryBoxes)
self.addmemb.grid(row=24,column=5,sticky=E+N+S)
self.removmemb = Button(self.gridframe, text='Remove Member',width=14,
command=self.removeMemberConfirm)
self.removmemb.grid(row=25,column=5,sticky=E+N+S)
self.viewmemb = Button(self.gridframe, text='View Member',width=14,
command=self.runViewMember)
self.viewmemb.grid(row=26,column=5,sticky=E+N+S)
#update save/cancel buttons
self.saveB = Button(self.gridframe, text='Save Changes',
command=self.updateInfo,width=20)
self.saveB.grid(row=28, column=3, columnspan=2)
self.cancelB = Button(self.gridframe, text='Cancel Changes',
command=self.cancel_changes,width=20)
self.cancelB.grid(row=28, column=5, columnspan=2)
#NEW CLIENT DISPLAY WIDGETS
#These variables appear on the newClientDisplay only
#
self.addhhsep = ttk.Separator(self.gridframe, orient='horizontal')
self.addhhtitle = Label(self.gridframe,text='Add Household Members',
font=("Helvetica", 16),fg='white',bg='gray10')
#add members to new household variable
self.q = IntVar()
self.famNum = Entry(self.gridframe, textvariable=self.q)
self.entNum = Label(self.gridframe,
text='Total Family Members: ',
font=('Helvetica',10),bg=self.bgcolor)
self.famname = Label(self.gridframe, text='Name:',
font=('Helvetica',10),bg=self.bgcolor)
self.famfn = Label(self.gridframe, text='First Name:',
font=('Helvetica',10),bg=self.bgcolor)
self.famln = Label(self.gridframe, text='Last Name:',
font=('Helvetica',10),bg=self.bgcolor)
self.famdob = Label(self.gridframe, text='Date of Birth:',
font=('Helvetica',10),bg=self.bgcolor)
self.famphone = Label(self.gridframe, text='Phone',
font=('Helvetica',10),bg=self.bgcolor)
self.fammon = Label(self.gridframe,text='mm',
font=('Helvetica',10),bg=self.bgcolor)
self.famday = Label(self.gridframe,text='dd',
font=('Helvetica',10),bg=self.bgcolor)
self.famyear = Label(self.gridframe,text='yyyy',
font=('Helvetica',10),bg=self.bgcolor)
self.newMembersB = Button(self.gridframe, text='Add Members',
command=self.familyEntryBoxes)
self.newClientSave = Button(self.gridframe, text='Save Client',
command=self.addNew)
self.cancelNewB = Button(self.gridframe, text='Cancel New Entry',
command=self.updateClientDisplay)
#Lists of all variables/gridded things
"""
self.updateDisplay = [self.adultsLabel, self.childrenLabel, self.seniorsLabel,
self.infantsLabel, self.totalLabel, self.dispad,
self.dischil, self.dissen, self.disinf, self.distot,
self.houseSep, self.housetitle, self.family_listbox,
self.fam_scroll, self.addmemb, self.removmemb, self.viewmemb,
self.saveB, self.cancelB]
self.newDisplay = [self.famNum, self.addhhsep, self.addhhtitle, self.entNum,
self.famname, self.famfn, self.famln, self.famdob,
self.famphone, self.fammon, self.famday, self.famyear,
self.newMembersB, self.newClientSave, self.cancelNewB]
self.EverythingElse = [self.cslabel, self.csblank, self.nameSearchEnt,
self.searchButton, self.client_listbox, self.scrollb,
self.firstSep, self.NCButton, self.secondSep, self.cilabel,
self.ciblank, self.fnlabel, self.fname, self.lnlabel,
self.lname, self.phlabel, self.phone, self.doblabel,
self.mob, self.dob, self.yob, self.alabel, self.avallabel,
self.djlabel, self.djEntry, self.thirdSep, self.vilabel,
self.viblank, self.datelab, self.notelab, self.vislab,
#added next line of objects
self.visit_listbox, self.visit_scroll, self.vnotes_scroll,
self.vollab, self.viblank, self.visitdate, self.notescv,
self.visitor, self.volun, self.blankLab2, self.visButton,
self.allvisButton, self.fourthSep, self.hilabel,
self.hiblank, self.adlab, self.address, self.aplab,
self.aptn, cilab, self.city, self.stlab, self.state,
self.zilab, self.zipc, self.dverilabel, self.mov,
self.dov, self.yov, self.vLab1, self.vLab2, self.blankLab5,
self.blankLab6, self.blankLab7, self.fifthsep,
self.blankLab8]
self.allvars = [self.ns, self.fnv, self.lnv, self.phv, self.mv, self.dv,
self.yv, self.agev, self.datejoinv, self.visdatev,
self.notv, self.visv, self.volv, self.adv, self.apv,
self.ctyv, self.stav, self.zpv, self.mvv, self.dvv, self.yvv,
self.adl, self.chil, self.sen, self.inf, self.tot, self.q]
"""
#MENU SETUP
self.menubar = Menu(self.ciGui)
#^Essentially re-selects client
self.volmenu = Menu(self.menubar, tearoff=0)
self.volmenu.add_command(label='Account Settings')
self.volmenu.add_command(label='Log Off', command=self.logoff)
self.volmenu.add_command(label='Configure Color', command=self.configure_background)
self.menubar.add_cascade(label='Volunteers',menu=self.volmenu)
self.optionsmenu = Menu(self.menubar,tearoff=0)
self.optionsmenu.add_command(label='Quit', command=self.quitprogram)
#self.optionsmenu.add_command(label='Log Off', command=self.logoff)
#self.optionsmenu.add_command(label='Account Settings')
#self.optionsmenu.add_command(label='Configure Color', command=self.configure_background)
self.menubar.add_cascade(label='Options',menu=self.optionsmenu)
#Reports Menu
self.reportmenu = Menu(self.menubar,tearoff=0)
self.reportmenu.add_command(label='View Weekly Report',
command=self.weeklyReport)
self.reportmenu.add_command(label='View Monthly Report',
command=self.monthlyReport)
self.reportmenu.add_command(label='View Yearly Report',
command=self.yearlyReport)
self.menubar.add_cascade(label='Reports',menu=self.reportmenu)
###infomenu not defined ERROR
#self.infomenu.add_command(label='View Households')#list_households
#self.infomenu.add_command(label='View Visits')#list_vis
#self.infomenu.add_command(label='View Volunteers')
#list_active_volunteers, list_all_volunteers, reactivate,remove
#self.menubar.add_cascade(label='Records',menu=self.infomenu)
self.ciGui.config(menu=self.menubar)
#instructive labels
#self.instructions = Text(self.gridframe, bd=4, width=20)
#self.instructions.grid(row=14, column=0, rowspan=20, columnspan=2,padx=10)
#self.instructions.insert('1.0', "Questions to Ask:")
#self.instructions.insert("Has anything changed in your family?")
#Sets some sizing stuff
for i in range(0, 10):
self.ciGui.columnconfigure(i, weight=1, minsize=10)
for i in range(0, 30):
self.ciGui.rowconfigure(i, weight=1, minsize=10)
self.ciGui.rowconfigure(18, weight=1, minsize=25)
#mainloop
self.ciGui.mainloop()
#DISPLAY SCREENS
def newClientDisplay(self):
"""This function will clear all irrelevant widgets, and
grid all widgets necessary for the new client screen.
"""
#clear widgets
self.clearEntries()
#grid widgets
self.addhhsep.grid(row=23,column=3,columnspan=40,sticky=EW, pady=10)
self.addhhtitle.grid(row=23,column=3,columnspan=12, sticky=W, pady=10)
self.famNum.grid(row=24, column=4)
self.entNum.grid(row=24, column=3)
self.newMembersB.grid(row=24, column=5)
self.newClientSave.grid(row=40,column=3, columnspan=2)
self.cancelNewB.grid(row=40, column=5, columnspan=2)
self.newvisitf()
self.saveVisit.grid_forget()
self.cancelVisit.grid_forget()
"""
self.visit_listbox.grid(row=8, column=3, rowspan=4, columnspan=1, sticky=W)
self.visit_scroll.grid(row=8, column=3, rowspan=4, columnspan=1, sticky=E+N+S)
self.newVisit.grid(row=8, column=8, sticky=W)
self.editVisit.grid(row=9, column=8, sticky=W)
self.deleteVisit.grid(row=10, column=8, sticky=W)
"""
return
def updateClientDisplay(self):
"""This function will clear all irrelevant widgets and
grid all widgets necessary for the updating-client screen.
"""
#clear widgets
self.clearEntries()
#grid widgets
self.family_listbox.grid(row=24, column=3, rowspan=3, columnspan=2, sticky=W)
self.fam_scroll.grid(row=24, column=4, rowspan=3, columnspan=1, sticky=E+N+S)
self.addmemb.grid(row=24,column=5,sticky=E+N+S)
self.removmemb.grid(row=25,column=5,sticky=E+N+S)
self.viewmemb.grid(row=26,column=5,sticky=E+N+S)
self.housetitle.grid(row=23,column=3,columnspan=12, sticky=W)
self.houseSep.grid(row=23,column=3,columnspan=40,sticky=EW)
self.saveB.grid(row=28, column=3, columnspan=2)
self.cancelB.grid(row=28, column=5, columnspan=2)
return
#DISPLAY FOR SELECTED CLIENTS
def displayInfo(self, *args):
"""This function displays the information for a client that
has been selected in the client_listbox.
"""
try:
self.cursel = int(self.id_list[self.client_listbox.curselection()[0]])
info = select_client(self.cursel)
self.info = info
self.updateClientDisplay()
self.displayHouseholdMem(info)
self.displayVisitInfo(info)
self.displayClientInfo(info)
self.displayHouseholdInfo(info)
except IndexError:
pass
return
def displayNewInfo(self, client_id):
"""This function displays the information for a specified
client whose id is client_id.
"""
cursel = client_id
info = select_client(cursel)
self.info = info
self.updateClientDisplay()
self.displayHouseholdMem(info)
self.displayVisitInfo(info)
self.displayClientInfo(info)
self.displayHouseholdInfo(info)
return
#DISPLAY INFORMATION FUNCTIONS
def displayClientInfo(self, info, *args):
"""This function displays the client information.
"""
#retrieve info from dictionary
visitor = info["visitor"]
#set variables
self.fnv.set(visitor.firstname)
self.lnv.set(visitor.lastname)
month = self.month_int[visitor.dob.month]
self.mv.set(month)
self.dv.set(visitor.dob.day)
self.yv.set(visitor.dob.year)
self.phv.set(visitor.phone)
#parse and set datejoined
joined = str(visitor.dateJoined.month) + "/" +\
str(visitor.dateJoined.day) + "/" +\
str(visitor.dateJoined.year)
self.datejoinv.set(joined)
#set age
ad=str(age(visitor.dob))
a="Age: "
ad=str(a+ad)
self.agev.set(ad)
return
def displayHouseholdInfo(self, info, *args):
"""This function displays the household information for
a client.
"""
#retrieve info from dictionary
house = info["household"]
#set variables
self.adv.set(house.street)
self.apv.set(house.apt)
self.ctyv.set(house.city)
self.stav.set(house.state)
self.zpv.set(house.zip)
#check dateVerified, and set variables accordingly
if house.dateVerified != None:
month = house.dateVerified.month
self.mvv.set(self.month_int[month])
self.dvv.set(house.dateVerified.day)
self.yvv.set(house.dateVerified.year)
#parse and set label variables for all members
ad=str(info["agegroup_dict"]["adults"])
a="Adults: "
ad=str(a+ad)
self.adl.set(ad)
ch=str(info["agegroup_dict"]["children"])
c="Children: "
ch=c+ch
self.chil.set(ch)
sn=str(info["agegroup_dict"]["seniors"])
s="Seniors: "
sn=s+sn
self.sen.set(sn)
infa=str(info["agegroup_dict"]["infants"])
i="Infants: "
infa=i+infa
self.inf.set(infa)
#instead of appending like this, we'll be drawing
#the total right out of the database.
#
tl=int(info["agegroup_dict"]["adults"])+\
int(info["agegroup_dict"]["children"])+\
int(info["agegroup_dict"]["seniors"])+\
int(info["agegroup_dict"]["infants"])
tl=str(tl)
t='Total: '
tl=t+tl
self.tot.set(tl)
#grid family member labels
self.dispad.grid(row=22,column=3,sticky=W, pady=10)
self.dischil.grid(row=22,column=4,sticky=W)
self.dissen.grid(row=22,column=5,sticky=W)
self.disinf.grid(row=22,column=6,sticky=W)
self.distot.grid(row=22,column=7,sticky=W)
return
def displayVisitInfo(self, info, *args):
"""This function display the visit information for a client.
"""
self.clearVisits()
self.visitDict = {}
visitor = info["visitor"]
name = str(visitor.firstname)+ " " +str(visitor.lastname)
self.visv.set(name)
#visit info
visits = info["visit_list"]
if len(visits) == 0:
pass
else:
vdatelabs = []
vnlabs = []
vvisitors = []
vvols = []
vids = []
for v in visits:
d=str(v.date.month)+'/'+str(v.date.day)+'/'+str(v.date.year)
n=v.notes
vi=v.visitor
vol=v.volunteer
vid=v.visitID
vdatelabs.append(d)
vnlabs.append(n)
vvisitors.append(vi)
vvols.append(vol)
vids.append(vid)
#set variables to display first visit
self.visv.set(vvisitors[0])
self.volv.set(vvols[0])
self.notv.set(vnlabs[0])
self.notescv.config(state='normal')
self.notescv.insert('1.0', vnlabs[0])
self.notescv.config(state='disabled')
#save lists in dictionary
self.visitDict['dates'] = vdatelabs
self.visitDict['notes'] = vnlabs
self.visitDict['visitors'] = vvisitors
self.visitDict['volunteers'] = vvols
self.visitDict['ids'] = vids
for i in range(0, len(vdatelabs)):
self.visit_listbox.insert(i, vdatelabs[i])
self.visit_listbox.selection_set(0)
def displayVisit(self, *args):
"""This function will display the data for a visit when
a visit date is selected.
"""
try:
self.notescv.config(state='normal')
self.notescv.delete('1.0', END)
datev = int(self.visit_listbox.curselection()[0])
self.selectedVisit = datev
n = self.visitDict['notes']
vi = self.visitDict['visitors']
vol = self.visitDict['volunteers']
self.visv.set(vi[datev])
self.volv.set(vol[datev])
self.notv.set(n[datev])
notes = str(self.notv.get())
self.notescv.insert('1.0', notes)
self.notescv.config(state='disabled')
except IndexError:
pass
def displayHouseholdMem(self, info, *args):
"""This function displays the household information for a client.
"""
self.family_listbox.delete(0,END)
a=[]
del self.mem_list[:]
for member in info["member_list"]:
self.mem_list.append(member.id)
s=str(age(member.dob))
q='Age: '
s=q+s
x=(member.firstname, member.lastname,s)
a.append(x)
for i in range(len(a)):
self.family_listbox.insert(i,a[i])
#DISPLAY EXTRA ENTRY BOXES FOR ADDITIONAL FAMILY MEMBERS
#BUG: WHEN Add Member IS PRESSED MORE THAN ONCE, EXTRA
#BOXES HANG AROUND, AND ARE NEVER CLEARED
def familyEntryBoxes(self, *args):
"""This function generates entry boxes for adding new family members.
The entry boxes are saved in list form and added to the dictionary
memDict.
"""
#clears any boxes already displayed
self.clearFamily()
try:
n = int(self.q.get())
except ValueError:
return
#add instructive labels to grid
self.famfn.grid(row=25,column=3)
self.famln.grid(row=25,column=4)
self.famdob.grid(row=25,column=5)
self.famphone.grid(row=25,column=8)
#create lists
fnames = []
lnames = []
mm = []
dd = []
yy = []
phnum = []
#create entry boxes, grid them, and append them to a list
for i in range(0, n):
fname = Entry(self.gridframe)
fname.grid(row=26+i, column=3)
fnames.append(fname)
lname = Entry(self.gridframe)
lname.grid(row=26+i, column=4)
lnames.append(lname)
month = ttk.Combobox(self.gridframe, width=12, state='readonly',
values=self.month_li)
#month.bind('<<ComboboxSelected>>', self.monthbox_select)
month.grid(row=26+i, column=5)
mm.append(month)
day = Spinbox(self.gridframe, from_=0, to=0, width=5)
day.grid(row=26+i, column=6)
dd.append(day)
year = Spinbox(self.gridframe, from_=1900, to=2500, width=7)
year.grid(row=26+i, column=7)
yy.append(year)
phone = Entry(self.gridframe)
phone.grid(row=26+i, column=8)
phnum.append(phone)
#add all lists to dictionary
self.memDict["first"] = fnames
self.memDict["last"] = lnames
self.memDict["mm"] = mm
self.memDict["dd"] = dd
self.memDict["yy"] = yy
self.memDict["phone"] = phnum
def addMemberEntryBoxes(self, *args):
"""This function generates entry boxes for adding new family members.
The entry boxes are saved in list form and added to the dictionary
memDict.
"""
if self.addmemberON == True:
pass
else:
#add instructive labels to grid
self.famfn.grid(row=24,column=6) #, sticky=NE)
self.famln.grid(row=24,column=8) #, sticky=NE)
self.famdob.grid(row=25,column=6)
self.famphone.grid(row=26,column=6)
#create entry boxes, grid them, and append them to a list
#first name
self.fname = Entry(self.gridframe)
self.fname.grid(row=24, column=7, sticky=W)
self.memDict["first"]=[self.fname]
#last name
self.lname = Entry(self.gridframe)
self.lname.grid(row=24, column=9, sticky=W)
self.memDict["last"]=[self.lname]
#dob: month
self.month = ttk.Combobox(self.gridframe, width=12, state='readonly',
values=self.month_li)
#self.month.bind('<<ComboboxSelected>>', self.monthbox_select)
self.month.grid(row=25, column=7, sticky=W)
self.memDict["mm"]=[self.month]
#dob: day
self.day = Spinbox(self.gridframe, from_=0, to=0, width=5)
self.day.grid(row=25, column=8, sticky=W)
self.memDict["dd"]=[self.day]
#dob: year
self.year = Spinbox(self.gridframe, from_=1900, to=2500, width=7)
self.year.grid(row=25, column=9, sticky=W)
self.memDict["yy"]=[self.year]
#phone
self.phone = Entry(self.gridframe)
self.phone.grid(row=26, column=7, sticky=W)
self.memDict["phone"]=[self.phone]
#self.addmemberON = True
#CLEAR WIDGETS FUNCTIONS
def clearVisits(self):
"""This function clears the entry boxes/visit notes
used for visits.
"""
self.visit_listbox.delete(0, END)
self.visv.set("")
self.volv.set("")
self.notv.set("")
self.notescv.config(state='normal')
self.notescv.delete('1.0', END)
self.notescv.config(state='disabled')
visitob = [self.visit_listbox, self.visit_scroll, self.visitdate,
self.newVisit, self.editVisit, self.deleteVisit,
self.saveVisit, self.saveVisitE, self.cancelVisit]
for ob in visitob:
ob.grid_forget()
self.visit_listbox.grid(row=8, column=3, rowspan=4, columnspan=1, sticky=W)
self.visit_scroll.grid(row=8, column=3, rowspan=4, columnspan=1, sticky=E+N+S)
self.newVisit.grid(row=8, column=8, sticky=W)
self.editVisit.grid(row=9, column=8, sticky=W)
self.deleteVisit.grid(row=10, column=8, sticky=W)
def clearFamily(self):
#forgets additional family members
self.family_listbox.delete(0, END)
try:
mfname = self.memDict["first"]
mlname = self.memDict["last"]
mm = self.memDict["mm"]
dd = self.memDict["dd"]
yy = self.memDict["yy"]
phnum = self.memDict["phone"]
easylist = [mfname, mlname, mm, dd,
yy, phnum]
for i in range(0, 6):
for j in range(0, len(easylist[i])):
easylist[i][j].grid_forget()
for i in range(0, 6):
easylist[i] = []
self.memDict = {}
except KeyError:
pass
def clearEntries(self):
"""This function clears the entry boxes that will never be
removed from the display.
"""
allvaries = [self.fnv, self.lnv, self.phv, self.mv, self.dv, self.yv,
self.adv, self.apv, self.q, self.agev,
self.notv, self.volv, self.visv, self.adl, self.chil,
self.sen, self.inf, self.tot, self.datejoinv, self.mvv,
self.dvv, self.yvv]
#Clears the entryboxes
for i in range(0, len(allvaries)):
allvaries[i].set("")
#sets defaulted entries
today = datetime.now()
todaystr = str(today.month)+'/'+str(today.day)+\
'/'+str(today.year)
#self.visdatev.set(todaystr)
self.datejoinv.set(todaystr)
self.ctyv.set("Troy")
self.stav.set("NY")
self.zpv.set(12180)
#new client stuff
allforgets = [self.family_listbox,
self.fam_scroll, self.addmemb, self.removmemb,
self.viewmemb, self.housetitle, self.houseSep, self.saveB,
self.cancelB, self.dispad, self.dischil, self.dissen,
self.disinf, self.distot, self.addhhsep, self.addhhtitle,
self.famNum, self.entNum, self.newMembersB,
self.newClientSave, self.cancelNewB, self.famname,
self.famfn, self.famln, self.famdob, self.famphone,
self.fammon, self.famday, self.famyear]
for i in range(0, len(allforgets)):
allforgets[i].forget()
allforgets[i].grid_forget()
#forgets additional family members
#self.family_listbox.delete(0, END)
self.clearFamily()
#forgets previous visit notes
self.clearVisits()
self.visitDict = {}
def monthbox_select(self, *args):
"""This function is called when a month is selected from the
month combobox. It will look up the month in the month_day_dict,
and assign the right number of days to the "dob" spinbox.
"""
month = self.mv.get()
days = self.month_day_dict[month]
self.dob.config(from_=1, to=days)
return
#visit buttons
def newvisitf(self):
#clear Notes, Vol, & Visitor
self.visit_listbox.grid_forget()
self.visit_scroll.grid_forget()
self.newVisit.grid_forget()
self.editVisit.grid_forget()
self.deleteVisit.grid_forget()
#set date of visit to today
today = datetime.now()
tstr = str(today.month) + "/" + str(today.day) + "/" + str(today.year)
self.visdatev.set(tstr)
self.visitdate.grid(row=8, column=3)
#prepopulate volunteer
#prepopulate visitor (add test to see if this exists, in case of newclient)
self.notescv.config(state='normal')
self.notescv.delete('1.0', END)
self.saveVisit.grid(row=8, column=8, sticky=W)
self.cancelVisit.grid(row=9, column=8, sticky=W)
def editvisitf(self):
"""This function enables the user to edit
a visit.
"""
#gridding
self.visit_listbox.grid_forget()
self.visit_scroll.grid_forget()
self.newVisit.grid_forget()
self.editVisit.grid_forget()
self.deleteVisit.grid_forget()
self.volv.set(self.visitDict['volunteers'][self.selectedVisit])
self.visv.set(self.visitDict['visitors'][self.selectedVisit])
vdate = self.visitDict['dates'][self.selectedVisit]
self.visdatev.set(vdate)
#set volunteer from database
#set visitor from database
#set visdatev to Visit Date from database
self.visitdate.grid(row=8, column=3)
self.notescv.config(state='normal')
self.saveVisitE.grid(row=8, column=8, sticky=W)
self.cancelVisit.grid(row=9, column=8, sticky=W)
def cancelvisitf(self):
"""this will cancel a visit/changes to a visit"""
self.clearVisits()
d = self.visitDict["dates"]
for i in range(0, len(d)):
self.visit_listbox.insert(i, d[i])
self.visit_listbox.selection_set(0)
self.displayVisit()
def savevisitf(self):
"""BUG: oops! It's adding a new visit?"""
"""this will connect to Update Visit"""
try:
notes = str(self.notescv.get('1.0', END))
d = str(self.visdatev.get())
da = d.split('/')
dat = date(month=int(da[0]), day=int(da[1]), year=int(da[2]))
except:
self.error_popup("Check the visit date!")
idlist = self.visitDict['ids']
vid = idlist[self.selectedVisit]
update_vis(vid, dat, notes)
#refresh screen
self.clearVisits()
pid = self.cursel
info = select_client(pid)
self.displayVisitInfo(info)
def deletevisitf(self):
"""this will connect to delete visit"""
conf = messagebox.askquestion(
title='Confirm Delete',
message='Are you sure you want to delete this visit?')
if conf == 'yes':
idlist = self.visitDict['ids']
vid = idlist[self.selectedVisit]
remove_visit(vid)
#refresh screen
self.clearVisits()
pid = self.cursel
info = select_client(pid)
self.displayVisitInfo(info)
return
else:
return
def allVisits(self):
"""This function displays all of a client's
visits in a separate window.
"""
pass
def cancel_changes(self):
"""This function will clear the display and reselect
the client's information.
"""
self.updateClientDisplay()
self.displayInfo()
return
def quitprogram(self):
quit_session()
self.ciGui.destroy()
return
def logoff(self):
import logGui
self.ciGui.destory()
return
def monthlyReport(self):
generate_monthly_report()
return
def yearlyReport(self):
generate_yearly_report()
return
def weeklyReport(self):
generate_weekly_report()
return
def volunteerInfo(self):
return
def error_popup(self, errmessage):
"""This function implements a simple pop-up window to warn user
about bad data entry.
"""
conf = messagebox.showerror(title='Error', message=errmessage)
def recordVisit(self):
"""This function will insert a new visit, clear old visit
display info, and reset the visit display.
"""
#inserts new visit
try:
vol_id = int(self.volv.get())
except ValueError:
self.error_popup("Check volunteer id")
return
#get visit date
try:
dv = (str(self.visdatev.get())).split('/')
dvm = int(dv[0])
dvd = int(dv[1])
dvy = int(dv[2])
vdate = date(year=dvy, month=dvm, day=dvd)
except ValueError:
self.error_popup("Check visit date field!\n Enter: MM/DD/YYYY")
return
#get visit notes
try:
note = self.notescv.get("1.0", END)
except ValueError:
self.error_popup("Uh, oh! Better check the visit info!")
return
visitInfo = visitData(vol_id, visitDate=vdate, notes=note)
new_visit(self.cursel, visitInfo)
#clears old visit notes
self.clearVisits()
#refreshes visit note display
info = select_client(self.cursel)
self.displayVisitInfo(info)
#"Get All Input and Test It" functions
def getVisitorInput(self, ctype, cID=None):
"""This function tests all of the data for the visitor
entry boxes and returns an object.
"""
#Error checking for visitor's name and phone
try:
fname = str(self.fnv.get())
except ValueError:
self.error_popup("Check visitor's first name!")
return
try:
lname = str(self.lnv.get())
except ValueError:
self.error_popup("Check visitor's last name!")
return
try:
phnum = str(self.phv.get())
except ValueError:
self.error_popup("Check visitor's phone number!")
return
#Error checking for visitor's DOB
try:
month = str(self.mv.get())
dm = self.int_month[month]
except ValueError and KeyError:
self.error_popup("Check visitor's month of birth!")
return
try:
dd = int(self.dv.get())
except ValueError:
self.error_popup("Check visitor's day of birth!")
return
try:
dy = int(self.yv.get())
except ValueError:
self.error_popup("Check visitor's year of birth!")
return
try:
DOB = date(year=dy, month=dm, day=dd)
except ValueError:
self.error_popup("Was an invalid day of birth chosen?")
return
#Error checking for datejoined
try:
dj = (str(self.datejoinv.get())).split('/')
djm = int(dj[0])
djd = int(dj[1])
djy = int(dj[2])
datejoined = date(year=djy, month=djm, day=djd)
except ValueError:
self.error_popup("Check Date Joined field!\n Enter: MM/DD/YYYY")
return
if ctype == "old":
cd = oldClientData(cID, firstname=fname, lastname=lname,
dob=DOB, phone=phnum, dateJoined=datejoined)
elif ctype == "new":
cd = newClientData(firstname=fname, lastname=lname,
dob=DOB, phone=phnum, dateJoined=datejoined)
return cd
def getMemberInput(self, clist):
"""This function tests all of the input data for members
entry boxes and returns a data object.
"""
#Error checking for datejoined
try:
dj = (str(self.datejoinv.get())).split('/')
djm = int(dj[0])
djd = int(dj[1])
djy = int(dj[2])
datejoined = date(year=djy, month=djm, day=djd)
except ValueError:
self.error_popup("Check Date Joined field!\n Enter: MM/DD/YYYY")
return
#Check to see if any
if self.memDict != {}:
mfname = self.memDict["first"]
mlname = self.memDict["last"]
mm = self.memDict["mm"]
dd = self.memDict["dd"]
yy = self.memDict["yy"]
phnum = self.memDict["phone"]
for i in range(0, len(mfname)):
try:
fname = str(mfname[i].get())
except ValueError:
self.error_popup("Check family member "+str(i)+"'s first name!")
return
try:
lname = str(mlname[i].get())
except ValueError:
self.error_popup("Check family member "+str(i)+"'s last name!")
return
try:
phn = str(phnum[i].get())
except ValueError:
self.error_popup("Check family member "+str(i)+"'s phone!")
return
try:
month = str(mm[i].get())
dm = self.int_month[month]
except ValueError and KeyError:
self.error_popup("Check family member "+str(i)\
+"'s month of birth!")
return
try:
dday = int(dd[i].get())
except ValueError:
self.error_popup("Check family member "+str(i)\
+"'s day of birth!")
return
try:
dy = int(yy[i].get())
except ValueError:
self.error_popup("Check family member "+str(i)\
+"'s year of birth!")
return
try:
DOB = date(year=dy, month=dm, day=dday)
except ValueError:
self.error_popup("Was an invalid day of birth chosen for"\
" family member "+str(i)+"?")
return
ncd = newClientData(firstname=fname, lastname=lname,
dob=DOB, phone=phn, dateJoined=datejoined)
clist.append(ncd)
return clist
def getHouseholdInput(self):
"""This function tests all input for households in the household
entry boxes, and returns a data object.
"""
#get street address
try:
streeta = str(self.adv.get())
except ValueError:
self.error_popup("Check street address!")
return
#get city
try:
citya = str(self.ctyv.get())
except ValueError:
self.error_popup("Check city!")
return
#get state
try:
statea = str(self.stav.get())
except ValueError:
self.error_popup("Check state!")
return
#get zip code
try:
zipa = int(self.zpv.get())
except ValueError:
self.error_popup("Check zip code!")
return
#get apartment number
try:
apta = str(self.apv.get())
except ValueError:
self.error_popup("Check apartment number!")
return
#get date verified
if self.mvv.get() == self.dvv.get() == self.yvv.get() == "":
datev = None
else:
#get month
try:
month = str(self.mvv.get())
vm = self.int_month[month]
except ValueError and KeyError:
self.error_popup("Check month of date verified!")
return
#get day
try:
vd = int(self.dvv.get())
except ValueError:
self.error_popup("Check day of date verified!")
return
#get year
try:
vy = int(self.yvv.get())
except ValueError:
self.error_popup("Check day of date verified!")
return
#final date testing
try:
datev = date(year=vy, month=vm, day=vd)
except ValueError:
self.error_popup("Was an invalid day for date"\
+" verified chosen?")
return
houseInfo = houseData(street=streeta, city=citya, state=statea,
zip=zipa, apt=apta, dateVerified=datev)
return houseInfo
def getVisitInput(self):
"""This function tests all visit input and returns an object.
"""
#IMPLEMENT get volunteer id
try:
v = str(self.visdatev.get())
vd = v.split('/')
vdate = date(year=int(vd[2]), month=int(vd[0]), day=int(vd[1]))
except ValueError:
self.error_popup("Check the visit date!")
try:
vnote = str(self.notv.get())
except ValueError:
vnote=None
visitInfo = visitData(Vol_ID=1, visitDate=vdate, notes=vnote)
return visitInfo
def addNew(self):
"""This function adds a new household to the database.
#NOTE: we need to check checkboxes for dummy addresses
#(domestic violence address, and homeless address)
"""
#Test all input and create newClientData object for visitor
cd = self.getVisitorInput("new")
clist = [cd]
newClientInfo_list = self.getMemberInput(clist)
houseInfo = self.getHouseholdInput()
visitInfo = self.getVisitInput()
#send all objects to new_household function
client_id = new_household(houseInfo, visitInfo, newClientInfo_list)
#refresh list of clients
self.clientlist = list_people()
#refresh screen
self.displayNewInfo(client_id)
def updateInfo(self, *args):
"""This function will update the visitor's information, the household
information, and the visit information. It will also add family members,
but it will NOT update the family members.
"""
sel_id = self.cursel
nclist = []
cd = self.getVisitorInput("old", cID=sel_id)
oldClientInfo_list = [cd]
houseInfo = self.getHouseholdInput()
newClientInfo_list = self.getMemberInput(nclist)
update_all(sel_id, houseInfo, oldClientInfo_list, newClientInfo_list)
#refresh list of clients
self.clientlist = list_people()
#refresh screen
#self.updateClientDisplay()
self.displayNewInfo(self.cursel)
def nameSearch(self, *args):
"""This function returns relevant results
"""
#removes old listbox contents
self.client_listbox.delete(0, END)
del self.id_list[:]
#get user input
name = str(self.ns.get())
nameC = name.capitalize()
#name = str(self.ns.get()).capitalize()
#NOTE:Get lowercase names, too
c = self.clientlist
#find matching names in list
found_clients = []
for i in range(len(c)):
if name in c[i][0] or nameC in c[i][0]:
found_clients.append(c[i])
found_clients.sort()
#listing just the names and addresses of the people
x=[]
for i in range(len(found_clients)):
dobstr=str(found_clients[i][1].month)+\
"/"+str(found_clients[i][1].day)+\
'/'+str(found_clients[i][1].year)
a=str(found_clients[i][0])+" --"+dobstr
x.append(a)
self.id_list.append(found_clients[i][2])
#insert results into listbox
for i in range(len(x)):
self.client_listbox.insert(i,x[i])
return
def runViewMember(self):
"""This function displays the information for a client that
has been selected in the family_listbox.
"""
try:
n = self.family_listbox.curselection()[0]
self.cursel = self.mem_list[n]
info = select_client(self.cursel)
self.displayHouseholdMem(info)
self.displayVisitInfo(info)
self.displayClientInfo(info)
self.displayHouseholdInfo(info)
except IndexError:
pass
return
def removeMemberConfirm(self):
n = self.family_listbox.curselection()[0]
tbd = self.mem_list[n]
conf = messagebox.askquestion(
title='Confirm Removal',
message='Are you sure you want to delete this client?')
if conf == 'yes':
remove_client(tbd)
self.updateInfo()
return
else:
return
def configure_background(self, *args):
"""This function takes in a string and, if it matches a
valid color, will set the color of the interface to
the new color.
"""
import tkinter.colorchooser as cc
color = cc.askcolor()
color_name = color[1]
self.bgcolor = color_name
#self.ciGui.update()
#QUESTION: How do we save the color after the program is
#closed?
#ANSWER: Save it in the database!
#pass
if __name__ == '__main__':
ao = allobjects()
|
ChristinaHammer/Client_Database
|
cdbgui15.py
|
Python
|
mit
| 63,159
|
[
"VisIt"
] |
07c4bdb4cb8a126b351ef2f42e8106b9806b2384301de5fb01cf5eb2291493cb
|
#
# Copyright (C) 2013-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import unittest_decorators as utx
import numpy as np
import numpy.testing as npt
import espressomd
from espressomd import electrostatics
class pressureViaVolumeScaling:
def __init__(self, system, kbT):
self.system = system
self.kbT = kbT
self.old_box_lengths = np.copy(system.box_l)
self.old_volume = np.prod(self.old_box_lengths)
dV_div_old_volume = 0.001
self.dV = -dV_div_old_volume * self.old_volume
self.new_volume = self.old_volume + self.dV
self.new_box_l = (self.new_volume)**(1. / 3.)
self.list_of_previous_values = []
def measure_pressure_via_volume_scaling(self):
# taken from "Efficient pressure estimation in molecular simulations
# without evaluating the virial" only works so far for isotropic volume
# changes, i.e. the isotropic pressure
energy = self.system.analysis.energy()
Epot_old = energy["total"] - energy["kinetic"]
self.system.change_volume_and_rescale_particles(self.new_box_l, "xyz")
self.system.integrator.run(0)
energy = self.system.analysis.energy()
Epot_new = energy["total"] - energy["kinetic"]
self.system.change_volume_and_rescale_particles(
self.old_box_lengths[0], "xyz")
self.system.integrator.run(0)
DeltaEpot = Epot_new - Epot_old
particle_number = len(self.system.part[:].id)
current_value = (self.new_volume / self.old_volume)**particle_number * \
np.exp(-DeltaEpot / self.kbT)
self.list_of_previous_values.append(current_value)
def get_result(self):
average_value = np.mean(self.list_of_previous_values)
pressure = self.kbT / self.dV * np.log(average_value)
return pressure
@utx.skipIfMissingFeatures(["P3M", "LENNARD_JONES"])
class VirialPressureConsistency(ut.TestCase):
"""Test the consistency of the core implementation of the virial pressure
with an analytical relation which allows for the calculation of the
pressure as a volume derivative of a function of the potential energy
change on infinitesimal volume changes.
The relation and its derivation can be found in the paper with the name
"Efficient pressure estimation in molecular simulations without
evaluating the virial" by Harismiadis, V. I., J. Vorholz, and A. Z.
Panagiotopoulos. 1996"""
# Handle to espresso system
system = espressomd.System(box_l=[50, 50, 50])
def setUp(self):
np.random.seed(seed=1)
self.system.seed = range(
self.system.cell_system.get_state()["n_nodes"])
self.system.time_step = 0.01
self.kT = 0.5
self.system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=1.0, sigma=1.0, cutoff=2**(1.0 / 6.0), shift="auto")
num_part = 40
mass = 1
for i in range(num_part):
self.system.part.add(
pos=np.random.random(3) * self.system.box_l, q=1,
v=np.sqrt(self.kT / mass) * np.random.normal(loc=[0, 0, 0]))
self.system.part.add(
pos=np.random.random(3) * self.system.box_l, q=-1,
v=np.sqrt(self.kT / mass) * np.random.normal(loc=[0, 0, 0]))
#############################################################
# Warmup Integration #
#############################################################
self.system.integrator.set_steepest_descent(
f_max=0,
gamma=0.001,
max_displacement=0.01)
# warmup
while self.system.analysis.energy()["total"] > 10 * num_part:
print("minimization: {:.1f}".format(
self.system.analysis.energy()["total"]))
self.system.integrator.run(10)
self.system.integrator.set_vv()
self.system.thermostat.set_langevin(kT=self.kT, gamma=1.0, seed=41)
def test_p3m_pressure(self):
pressures_via_virial = []
pressures_via_volume_scaling = []
p3m = electrostatics.P3M(
prefactor=2.0,
accuracy=1e-3,
mesh=16,
cao=6,
r_cut=1.4941e-01 * self.system.box_l[0])
self.system.actors.add(p3m)
print("Tune skin: {}".format(self.system.cell_system.tune_skin(
min_skin=0.0, max_skin=2.5, tol=0.05, int_steps=100)))
num_samples = 100
pressure_via_volume_scaling = pressureViaVolumeScaling(
self.system, self.kT)
for i in range(num_samples):
self.system.integrator.run(100)
pressures_via_virial.append(
self.system.analysis.pressure()['total'])
pressure_via_volume_scaling.measure_pressure_via_volume_scaling()
pressure_virial = np.mean(pressures_via_virial)
# deviation should be below 5%
abs_deviation_in_percent = 100 * abs(
pressure_virial / pressure_via_volume_scaling.get_result() - 1.0)
npt.assert_array_less(abs_deviation_in_percent, 5.0)
if __name__ == "__main__":
ut.main()
|
mkuron/espresso
|
testsuite/python/p3m_electrostatic_pressure.py
|
Python
|
gpl-3.0
| 5,881
|
[
"ESPResSo"
] |
f2037846325848278776233dffea9782c6795c66d0a83b0398eae29a98deb2b6
|
# Lint as: python3
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Stan models, used as a source of ground truth."""
from inference_gym.internal import data
from inference_gym.tools.stan import brownian_motion
from inference_gym.tools.stan import eight_schools as eight_schools_lib
from inference_gym.tools.stan import item_response_theory
from inference_gym.tools.stan import log_gaussian_cox_process
from inference_gym.tools.stan import logistic_regression
from inference_gym.tools.stan import lorenz_system
from inference_gym.tools.stan import probit_regression
from inference_gym.tools.stan import radon_contextual_effects
from inference_gym.tools.stan import radon_contextual_effects_halfnormal
from inference_gym.tools.stan import sparse_logistic_regression
from inference_gym.tools.stan import stochastic_volatility
__all__ = [
'brownian_motion_missing_middle_observations',
'brownian_motion_unknown_scales_missing_middle_observations',
'convection_lorenz_bridge',
'convection_lorenz_bridge_unknown_scales',
'eight_schools',
'german_credit_numeric_logistic_regression',
'german_credit_numeric_probit_regression',
'german_credit_numeric_sparse_logistic_regression',
'radon_contextual_effects_indiana',
'radon_contextual_effects_indiana_halfnormal',
'radon_contextual_effects_minnesota',
'radon_contextual_effects_minnesota_halfnormal',
'stochastic_volatility_log_sp500',
'stochastic_volatility_log_sp500_small',
'stochastic_volatility_sp500',
'stochastic_volatility_sp500_small',
'synthetic_item_response_theory',
'synthetic_log_gaussian_cox_process',
]
def brownian_motion_missing_middle_observations():
"""Brownian Motion with missing observations.
Returns:
target: StanModel.
"""
dataset = data.brownian_motion_missing_middle_observations()
return brownian_motion.brownian_motion(**dataset)
def brownian_motion_unknown_scales_missing_middle_observations():
"""Brownian Motion with missing observations and unknown scale parameters.
Returns:
target: StanModel.
"""
dataset = data.brownian_motion_missing_middle_observations()
return brownian_motion.brownian_motion_unknown_scales(
observed_locs=dataset['observed_locs'])
def convection_lorenz_bridge():
"""Lorenz System with observed convection and missing observations.
Returns:
target: StanModel.
"""
dataset = data.convection_lorenz_bridge()
return lorenz_system.partially_observed_lorenz_system(**dataset)
def convection_lorenz_bridge_unknown_scales():
"""Lorenz System with observed convection and missing observations.
Returns:
target: StanModel.
"""
dataset = data.convection_lorenz_bridge()
del dataset['innovation_scale']
del dataset['observation_scale']
return lorenz_system.partially_observed_lorenz_system_unknown_scales(
**dataset)
def eight_schools():
"""Eight schools hierarchical regression model."""
return eight_schools_lib.eight_schools()
def german_credit_numeric_logistic_regression():
"""German credit (numeric) logistic regression.
Returns:
target: StanModel.
"""
dataset = data.german_credit_numeric()
del dataset['test_features']
del dataset['test_labels']
return logistic_regression.logistic_regression(**dataset)
def german_credit_numeric_probit_regression():
"""German credit (numeric) probit regression.
Returns:
target: StanModel.
"""
dataset = data.german_credit_numeric()
del dataset['test_features']
del dataset['test_labels']
return probit_regression.probit_regression(**dataset)
def german_credit_numeric_sparse_logistic_regression():
"""German credit (numeric) logistic regression with a sparsity-inducing prior.
Returns:
target: StanModel.
"""
dataset = data.german_credit_numeric()
del dataset['test_features']
del dataset['test_labels']
return sparse_logistic_regression.sparse_logistic_regression(**dataset)
def radon_contextual_effects_indiana():
"""Hierarchical radon model with contextual effects, with data from Indiana.
Returns:
target: StanModel.
"""
dataset = data.radon_indiana()
for key in list(dataset.keys()):
if key.startswith('test_'):
del dataset[key]
return radon_contextual_effects.radon_contextual_effects(**dataset)
def radon_contextual_effects_indiana_halfnormal():
"""Hierarchical radon model with contextual effects, with data from Indiana.
Returns:
target: StanModel.
"""
dataset = data.radon_indiana()
for key in list(dataset.keys()):
if key.startswith('test_'):
del dataset[key]
return radon_contextual_effects_halfnormal.radon_contextual_effects(**dataset)
def radon_contextual_effects_minnesota():
"""Hierarchical radon model with contextual effects, with data from Minnesota.
Returns:
target: StanModel.
"""
dataset = data.radon_minnesota()
for key in list(dataset.keys()):
if key.startswith('test_'):
del dataset[key]
return radon_contextual_effects.radon_contextual_effects(**dataset)
def radon_contextual_effects_minnesota_halfnormal():
"""Hierarchical radon model with contextual effects, with data from Minnesota.
Returns:
target: StanModel.
"""
dataset = data.radon_minnesota()
for key in list(dataset.keys()):
if key.startswith('test_'):
del dataset[key]
return radon_contextual_effects_halfnormal.radon_contextual_effects(**dataset)
def stochastic_volatility_sp500():
"""Stochastic volatility model.
This uses a dataset of 2517 daily returns of the S&P 500 index,
representing the time period 6/25/2010-6/24/2020.
Returns:
target: StanModel.
"""
dataset = data.sp500_returns()
return stochastic_volatility.stochastic_volatility(**dataset)
def stochastic_volatility_sp500_small():
"""Stochastic volatility model.
This is a smaller version of `stochastic_volatility_model_sp500` using only
100 days of returns from the S&P 500, ending 6/24/2020.
Returns:
target: StanModel.
"""
dataset = data.sp500_returns(num_points=100)
return stochastic_volatility.stochastic_volatility(**dataset)
def stochastic_volatility_log_sp500():
"""Stochastic volatility model.
This uses a dataset of 2517 daily log returns of the S&P 500 index,
representing the time period 6/25/2010-6/24/2020.
Returns:
target: StanModel.
"""
dataset = data.sp500_log_returns()
return stochastic_volatility.stochastic_volatility(**dataset)
def stochastic_volatility_log_sp500_small():
"""Stochastic volatility model.
This is a smaller version of `stochastic_volatility_model_log_sp500` using
only 100 days of log returns from the S&P 500, ending 6/24/2020.
Returns:
target: StanModel.
"""
dataset = data.sp500_log_returns(num_points=100)
return stochastic_volatility.stochastic_volatility(**dataset)
def synthetic_item_response_theory():
"""One-parameter logistic item-response theory (IRT) model.
This uses a dataset sampled from the prior. This dataset is a simulation of
400 students each answering a subset of 100 unique questions, with a total of
30012 questions answered.
Returns:
target: StanModel.
"""
dataset = data.synthetic_item_response_theory()
del dataset['test_student_ids']
del dataset['test_question_ids']
del dataset['test_correct']
return item_response_theory.item_response_theory(**dataset)
def synthetic_log_gaussian_cox_process():
"""Log-Gaussian Cox Process model.
This dataset was simulated by constructing a 10 by 10 grid of equidistant 2D
locations with spacing = 1, and then sampling from the prior to determine the
counts at those locations.
Returns:
target: StanModel.
"""
dataset = data.synthetic_log_gaussian_cox_process()
return log_gaussian_cox_process.log_gaussian_cox_process(**dataset)
|
tensorflow/probability
|
spinoffs/inference_gym/inference_gym/tools/stan/targets.py
|
Python
|
apache-2.0
| 8,434
|
[
"Gaussian"
] |
4553a26089b461cbb6174cc7d3035b9004904519df20149659476732fa57e11c
|
#!/usr/bin/env python3
#
# Copyright (c) 2012 Brian Yi ZHANG <brianlions at gmail dot com>
#
# This file is part of pynebula.
#
# pynebula is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pynebula is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pynebula. If not, see <http://www.gnu.org/licenses/>.
#
import errno
import os
import select
import time
from .. import debug_info as _debug_info
from .. import log as _log
from . import _error
class _SelectApiWrapper(object):
SELECT_IN, SELECT_OUT, SELECT_ERR = 0x01, 0x02, 0x04
def __init__(self):
# mapping from fd to events monitored on that fd
self._monitored = {}
def register(self, fd, eventmask = (SELECT_IN | SELECT_OUT | SELECT_ERR)):
'''Register a file descriptor.
Registering a file descriptor that's already registered is NOT an error.
Args:
fd: (int) file descriptor
eventmask: (int) optional bitmask of events being waited for
'''
self._monitored[fd] = eventmask
def unregister(self, fd):
'''Remove a file descriptor being tracked.
Raises:
KeyError if fd was never registered.
'''
del self._monitored[fd]
def modify(self, fd, eventmask):
'''Modifies an already registered fd.
Raises:
IOError with errno set to ENOENT, if the fd was never registered.
'''
if fd not in self._monitored:
raise IOError(errno.ENOENT)
self._monitored[fd] = eventmask
def poll(self, timeout = None):
'''Polls the set of registered file descriptors for events.
Args:
timeout: timeout in seconds (as float), 0 to return immediately,
negative or None to block until at least one event was
fired.
Returns:
A possibly empty list containing (fd, event) 2-tuples for the
descriptors that have events or errors to report.
'''
rd = []
wr = []
exp = []
for fd, eventmask in self._monitored.items():
if eventmask & self.SELECT_IN:
rd.append(fd)
if eventmask & self.SELECT_OUT:
wr.append(fd)
if eventmask & self.SELECT_ERR:
exp.append(fd)
# Negative timeout won't be accepted by select.select()!
if (timeout is not None) and timeout < 0:
timeout = None
(rd, wr, exp) = select.select(rd, wr, exp, timeout)
mapping = {}
for fd in rd:
mapping[fd] = self.SELECT_IN
for fd in wr:
if fd in mapping:
mapping[fd] |= self.SELECT_OUT
else:
mapping[fd] = self.SELECT_OUT
for fd in exp:
if fd in mapping:
mapping[fd] |= self.SELECT_ERR
else:
mapping[fd] = self.SELECT_ERR
return [(fd, events) for (fd, events) in mapping.items()]
#------------------------------------------------------------------------------
class AsyncEvent(_log.WrappedLogger):
'''Asynchronous events handling, based on Python builtin module `select'.'''
API_DEFAULT, API_EPOLL, API_POLL, API_SELECT = 0, 1, 2, 3
__api_names = {API_EPOLL: "epoll", API_POLL: "poll", API_SELECT: "select"}
def __init__(self, raise_exceptions = True, api = API_DEFAULT,
log_handle = None):
'''Asynchronous event loop.
Args:
raise_exceptions: If value of this argument is True, then attempting
to register() an already registered dispatcher, or
to unregister() a not registered dispatcher, will
raise an IOError exception.
api: Specifies the event API to be used, valid values
are API_DEFAULT, API_EPOLL, API_POLL, API_SELECT.
Note that this argument is just a hint, if the
specified API is not supported by the operating
system, this class will automatically choose an
API available.
log_handle: A log handle to be used, None to disable logging.
'''
_log.WrappedLogger.__init__(self, log_handle = log_handle)
# poll object for I/O events
if api <= self.API_EPOLL and hasattr(select, 'epoll'):
self.__event_api_init(self.API_EPOLL)
elif api <= self.API_POLL and hasattr(select, 'poll'):
self.__event_api_init(self.API_POLL)
elif api <= self.API_SELECT and hasattr(select, 'select'):
self.__event_api_init(self.API_SELECT)
else:
raise ValueError("API {:d} is not supported".format(api))
#=======================================================================
# NOTE:
# This feature is NOT implemented yet!!!
#
# Tips:
# Use os.read() & os.write().
#=======================================================================
# pipe used by set_stop_flag() to make epoll.poll() return
self._pipe_rd_end, self._pipe_wr_end = os.pipe()
self.__set_nonblock_flag(self._pipe_rd_end)
self.__set_nonblock_flag(self._pipe_wr_end)
self.log_debug("AsyncEvent initialized, api {:s}{:s}, pipe (r {:d}, w {:d})".format(
self.event_api_name(),
self.event_api() == self.API_EPOLL and ", epoll_fd {:d}".format(self._pollster.fileno()) or "",
self._pipe_rd_end, self._pipe_wr_end))
# --- file events related ---
# mapping from fd to dispatcher object
self._registered_dispatchers = {}
# mapping from fd to events monitored
self._monitored_events = {}
# list of monitored fds with timeout, item is 2-tuple of (timeout, fd)
self._fds_with_timeout = []
# --- time events related ---
self._time_events = []
# raise an exception in case of error
self._raise_exceptions = raise_exceptions
self._stop_flag = False
def __event_api_init(self, api):
if api == self.API_EPOLL:
self._pollster = select.epoll()
self._event_api = self.API_EPOLL
self._event_in_mask = select.EPOLLIN
self._event_pri_mask = select.EPOLLPRI
self._event_out_mask = select.EPOLLOUT
self._event_hup_mask = select.EPOLLHUP
self._event_err_mask = select.EPOLLERR
elif api == self.API_POLL:
self._pollster = select.poll()
self._event_api = self.API_POLL
self._event_in_mask = select.POLLIN
self._event_pri_mask = select.POLLPRI
self._event_out_mask = select.POLLOUT
self._event_hup_mask = select.POLLHUP
self._event_err_mask = select.POLLERR
elif api == self.API_SELECT:
self._pollster = _SelectApiWrapper()
self._event_api = self.API_SELECT
self._event_in_mask = _SelectApiWrapper.SELECT_IN
self._event_pri_mask = _SelectApiWrapper.SELECT_IN
self._event_out_mask = _SelectApiWrapper.SELECT_OUT
self._event_hup_mask = _SelectApiWrapper.SELECT_ERR
self._event_err_mask = _SelectApiWrapper.SELECT_ERR
else:
raise ValueError("API {:d} is not supported".format(api))
def event_api_name(self):
"String representation of the event API used."
return self.__api_names[self._event_api]
def event_api(self):
"Event API used."
return self._event_api
def set_stop_flag(self):
'''Try to stop the event loop.
Notes:
This method may not work as expected, refer to method loop() for more
information.
'''
self._stop_flag = True
def get_stop_flag(self):
'''Check if the stop flag was set.'''
return self._stop_flag
def num_of_dispatchers(self):
'''Returns number of Dispatcher objects being monitored.'''
return len(self._registered_dispatchers)
def num_of_scheduled_jobs(self):
'''Returns number of ScheduledJob scheduled.'''
return len(self._time_events)
def __str__(self):
return "<%s.%s at %s {api: %s%s, pipe_rd:%d, pipe_wr:%d, dispatchers:%d, jobs:%d, stop_flag:%d}>" % \
(self.__class__.__module__, self.__class__.__name__, hex(id(self)),
self.event_api_name(),
self.event_api() == self.API_EPOLL and ", epoll_fd:{:d}".format(self._pollster.fileno()) or "",
self._pipe_rd_end, self._pipe_wr_end, self.num_of_dispatchers(),
self.num_of_scheduled_jobs(), self._stop_flag,)
def register(self, disp_obj):
'''Register a dispatcher object.
Returns:
If registered succesfully, return True. If failed, depends on the
value of raise_exceptions passed to __init__(), may either return
False or raise an exception.
Raises:
TypeError: if the supplied object is not an instance of Dispatcher.
IOError: with errno EEXIST if the dispatcher was already registered.
'''
if not isinstance(disp_obj, Dispatcher):
if self._raise_exceptions:
raise TypeError('disp_obj {:s} is not an instance of Dispatcher'.format(repr(disp_obj)))
else:
return False
file_number = disp_obj.fileno()
if file_number not in self._registered_dispatchers:
disp_obj.attach_to_pollster(self)
flags = 0
flag_names = []
if disp_obj.monitor_readable():
flags |= self._event_in_mask
flag_names.append("IN")
if self._event_in_mask != self._event_pri_mask:
flags |= self._event_pri_mask
flag_names.append("PRI")
if disp_obj.monitor_writable():
flags |= self._event_out_mask
flag_names.append("OUT")
timeout = disp_obj.monitor_timeout()
self._pollster.register(file_number, flags)
self.log_debug("monitored fd {:d}, flags ({:s})".format(file_number,
" ".join(flag_names)))
self._registered_dispatchers[file_number] = disp_obj
# NOTE: a new entry is always created, no matter flag is 0 or not!
self._monitored_events[file_number] = flags
if timeout:
self._fds_with_timeout.append((timeout, file_number))
self.log_debug("fd {:d}, timeout event at {:s}".format(
file_number, _log.Logger.timestamp_str(timeout)))
else:
self.log_debug("fd {:d}, no timeout event".format(file_number))
return True
elif not self._raise_exceptions:
return False
else:
raise IOError(errno.EEXIST,
"fd {:d} was already registered".format(disp_obj.fileno()))
def unregister(self, disp_obj):
'''Unregister a dispatcher object.
Returns:
If unregistered succesfully, return True. If failed, depends on the
value of raise_exceptions passed to __init__(), may either return
False or raise an exception.
Raises:
TypeError: if the supplied object is not an instance of Dispatcher.
IOError: with errno ENOENT if the dispatcher was already registered.
'''
if not isinstance(disp_obj, Dispatcher):
if self._raise_exceptions:
raise TypeError('disp_obj {:s} is not an instance of Dispatcher'.format(repr(disp_obj)))
else:
return False
file_number = disp_obj.fileno()
if file_number in self._registered_dispatchers:
disp_obj.detach_from_pollster(self)
del self._registered_dispatchers[file_number]
del self._monitored_events[file_number]
self.__remove_timeout_fd(file_number)
self._pollster.unregister(file_number)
return True
elif not self._raise_exceptions:
return False
else:
raise IOError(errno.ENOENT,
"fd {:d} is not registered".format(disp_obj.fileno()))
def add_scheduled_job(self, job_obj):
'''Schedule to execute the job in the future.
Returns:
Absolute time (since epoch, as float) the job will be started, or
None if not scheduled.
'''
timeout = job_obj.schedule()
if timeout:
self._time_events.append((timeout, job_obj))
return True
else:
return False
def __set_nonblock_flag(self, fd):
'''Set O_NONBLOCK flag for the supplied file descriptor.
Returns:
True if set, False otherwise.
Notes:
Not supported on Windows.
'''
try:
import fcntl
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
if flags < 0:
return False
flags = fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
if flags:
return False
return True
except IOError as err:
self.log_notice("fcntl() failed setting O_NONBLOCK on fd {:d}, exception {:s}".format(fd, err))
return False
except ImportError:
return False
def __process_fired_events(self, fd, flags):
disp_obj = self._registered_dispatchers[fd]
try:
# We need to check if fd is in self._registered_dispatchers, 'cause
# those `handle_*' methods might remove `disp_obj' from the
# AsyncEvent instance (viz. self) used here.
# 1st: PRI event
if (self._event_pri_mask != self._event_in_mask) \
and (flags & self._event_pri_mask) \
and (fd in self._registered_dispatchers):
disp_obj.handle_expt_event()
# 2nd: IN event
if (flags & self._event_in_mask) \
and (fd in self._registered_dispatchers):
disp_obj.handle_read_event()
# 3rd: OUT event
if (flags & self._event_out_mask) \
and (fd in self._registered_dispatchers):
disp_obj.handle_write_event()
# 4th: HUP and ERR event
if (flags & (self._event_hup_mask | self._event_err_mask)) \
and (fd in self._registered_dispatchers):
disp_obj.handle_close()
except (_error.AeExitNow, KeyboardInterrupt, SystemExit):
raise
except Exception as e:
disp_obj.handle_error(e)
def __loop_step(self):
nearest_timeout = -1
now = time.time()
if len(self._fds_with_timeout):
self.__sort_timeout_fds()
nt = self._fds_with_timeout[0][0] - now
if nt <= 0: # already timed out
nearest_timeout = 0
else: # not timed out yet
if (nearest_timeout < 0) or (nt < nearest_timeout):
nearest_timeout = nt
if len(self._time_events):
self.__sort_time_events()
nt = self._time_events[0][0] - now
if nt <= 0: # already timed out
nearest_timeout = 0
else: # not timed out yet
if (nearest_timeout < 0) or (nt < nearest_timeout):
nearest_timeout = nt
try:
# NOTE:
# select.poll() requires the timeout be specified in milliseconds,
# but select.epoll() and select.select() require it specified in
# seconds (as float).
if nearest_timeout \
and (self.event_api() == self.API_POLL) \
and (nearest_timeout > 0):
nearest_timeout = int(nearest_timeout * 1000)
result = self._pollster.poll(nearest_timeout)
except (select.error, IOError) as err:
if err.args[0] != errno.EINTR:
raise
result = []
if len(result):
for fd, flags in result:
if self.get_log_handle():
flag_names = []
if flags & self._event_in_mask:
flag_names.append("IN")
if flags & self._event_out_mask:
flag_names.append("OUT")
if (flags & self._event_pri_mask) \
and (self._event_pri_mask != self._event_in_mask):
flag_names.append("PRI")
if (flags & self._event_hup_mask) \
and (self._event_hup_mask != self._event_err_mask):
flag_names.append("HUP")
if flags & self._event_err_mask:
flag_names.append("ERR")
self.log_debug("events fired, fd {:d}, flags ({:s})".format(
fd, " ".join(flag_names)))
self.__process_fired_events(fd, flags)
self.__update_associated_events(fd)
else:
now = time.time()
# handle timeout events of file descriptors
for (timeout, fd) in self._fds_with_timeout:
if timeout > now:
break
del self._fds_with_timeout[0]
self._registered_dispatchers[fd].handle_timeout_event(self)
self.__update_associated_events(fd)
# handle scheduled jobs
# TODO: it seems strange to deleting items from iterable while iterating
for (timeout, job_obj) in self._time_events:
if timeout > now:
break
del self._time_events[0]
job_obj.handle_job_event()
new_timeout = job_obj.schedule()
if new_timeout:
self._time_events.append((new_timeout, job_obj))
def __sort_timeout_fds(self):
self._fds_with_timeout = sorted(self._fds_with_timeout,
key = lambda item: item[0])
def __sort_time_events(self):
self._time_events = sorted(self._time_events,
key = lambda item: item[0])
def __remove_timeout_fd(self, fd):
for (idx, (unused_timeout, a_fd)) in enumerate(self._fds_with_timeout):
if fd == a_fd:
del self._fds_with_timeout[idx]
# assume there's no duplicated fd in self._fds_with_timeout[]
break
def __update_associated_events(self, fd):
if fd not in self._registered_dispatchers:
return
self.__remove_timeout_fd(fd)
disp_obj = self._registered_dispatchers[fd]
flags = 0
flag_names = []
if disp_obj.monitor_readable():
flags |= self._event_in_mask
flag_names.append("IN")
if self._event_in_mask != self._event_pri_mask:
flags |= self._event_pri_mask
flag_names.append("PRI")
if disp_obj.monitor_writable():
flags |= self._event_out_mask
flag_names.append("OUT")
if self._monitored_events[fd] != flags:
self.log_debug("modifying fd {:d}, flags {:d} -> {:d} ({:s})".format(
fd, self._monitored_events[fd], flags, " ".join(flag_names)))
self._pollster.modify(fd, flags)
self._monitored_events[fd] = flags
else:
self.log_debug("monitored fd {:d}, flags ({:s})".format(fd,
" ".join(flag_names)))
timeout = disp_obj.monitor_timeout()
if timeout:
self._fds_with_timeout.append((timeout, fd))
self.log_debug("fd {:d}, timeout event at {:s}".format(fd,
_log.Logger.timestamp_str(timeout)))
else:
self.log_debug("fd {:d}, no timeout event".format(fd))
def loop(self):
'''Starts the event loop
Event loop will terminate if no Dispatcher or ScheduledJob is available.
NOTES (or TODOs):
Sometimes this method might not terminate as you expected, because we
might be blocked in the call of poll().
'''
self.log_notice("starting {:s}".format(self))
while (not self.get_stop_flag()) \
and (self.num_of_dispatchers() or self.num_of_scheduled_jobs()):
self.__loop_step()
self.log_notice("finishing {:s}".format(self))
#------------------------------------------------------------------------------
class Dispatcher(_log.WrappedLogger):
'''Wrapper around lower level file (or socket) descriptor object.
This class turns a file (or socket) descriptor into a non-blocking object,
and when certain low level events fired, the asynchronous loop will detect
it and calls corresponding handler methods to handle it.
'''
def __init__(self, log_handle = None):
_log.WrappedLogger.__init__(self, log_handle)
self.__pollster = None
def attach_to_pollster(self, pollster):
if not isinstance(pollster, AsyncEvent):
raise TypeError("{:s}: not instance of AsyncEvent".format(repr(pollster)))
if self.__pollster:
raise _error.AeAlreadyAttachedError
self.__pollster = pollster
def detach_from_pollster(self, pollster):
if not isinstance(pollster, AsyncEvent):
raise TypeError("{:s}: not instance of AsyncEvent".format(repr(pollster)))
if not self.__pollster:
raise _error.AeNotAttachedError
self.__pollster = None
def pollster(self, raise_exception = True):
if not self.__pollster and raise_exception:
raise _error.AeNotAttachedError
return self.__pollster
# 1. helper methods, implement these methods in derived classes
def fileno(self):
'''Returns file descriptor of the open file (or socket).
NOTES:
Subclass must override this method.
'''
raise NotImplementedError("{:s}.{:s}: fileno() not implemented".format(
self.__class__.__module__, self.__class__.__name__))
def close(self):
'''Closes the underlying file descriptor (or socket).
NOTES:
Subclass must override this method.
'''
raise NotImplementedError("{:s}.{:s}: close() not implemented".format(
self.__class__.__module__, self.__class__.__name__))
# 2. predicate for AsyncEvent, implement these methods in derived classes
def readable(self):
'''Determine whether read event on the underlying fd should be waited.
At the beginning of each round of the asynchronous loop, this method
will be called.
'''
self.log_notice("{:s}.{:s}: using default readable()".format(
self.__class__.__module__, self.__class__.__name__))
return True
def writable(self):
'''Determine whether write event on the underlying fd should be waited.
At the beginning of each round of the asynchronous loop, this method
will be called.
'''
self.log_notice("{:s}.{:s}: using default writable()".format(
self.__class__.__module__, self.__class__.__name__))
return True
def timeout(self):
'''Determine whether timeout event on the underlying fd should be waited.
At the beginning of each round of the asynchronous loop, this method
will be called.
Returns:
time in seconds (as float) since the Epoch, if interested in timeout
event; either None or 0, if not interested in timeout event.
'''
self.log_notice("{:s}.{:s}: using default timeout()".format(
self.__class__.__module__, self.__class__.__name__))
return None
# 3. methods used for handling of events, implement these methods in derived
# classes
def handle_read(self):
'''Called when the underlying fd is readable.'''
self.log_notice("{:s}.{:s}: using default handle_read()".format(
self.__class__.__module__, self.__class__.__name__))
def handle_write(self):
'''Called when the underlying fd is writable.'''
self.log_notice("{:s}.{:s}: using default handle_write()".format(
self.__class__.__module__, self.__class__.__name__))
def handle_timeout(self):
'''Called when the underlying fd is timed out.'''
self.log_notice("{:s}.{:s}: using default handle_timeout()".format(
self.__class__.__module__, self.__class__.__name__))
def handle_expt(self):
'''Called when there's out of band (OOB) data for the underlying fd.'''
self.log_notice("{:s}.{:s}: using default handle_expt()".format(
self.__class__.__module__, self.__class__.__name__))
def handle_error(self, exception_obj):
'''Called when an exception was raised and not handled.
This default version prints a traceback, then calls `handle_close()',
in order to dissociate the underlying fd from the AsyncEvent object and
closes it.
NOTES:
1. there's NO accompanying method `handle_error_event()';
2. this method calls handle_close()!
3. Subclass (e.g. D) may do necessary cleanup, and use the `super()'
method to call this method:
e.g.
>>> class Derived(Dispatcher):
>>> def handle_error(self, ae_obj, exception_obj):
>>> ...
>>> # do something
>>> ...
>>> super(Derived, self).handle_error(ae_obj, exception_obj)
'''
if exception_obj:
unused_nil, exp_type, exp_value, exp_traceback = _debug_info.compact_traceback()
self.log_notice('error, exception {:s} (type: {:s}, callstack: {:s}), fd {:d}, ae {:s}'.format(
exp_value, exp_type, exp_traceback, self.fileno(), self.pollster(False)))
else:
self.log_notice('error, fd {:d}, ae {:s}'.format(self.fileno(), self.pollster(False)))
self.handle_close()
def handle_close(self):
'''Called when the underlying fd was closed.
NOTES:
1. there's NO accompanying method `handle_close_event()';
2. this method closes the underlying fd, after dissociate it from
`ae_obj';
3. Subclass (e.g. D) may do necessary cleanup, and use the `super()'
method to call this method:
e.g.
>>> class Derived(Dispatcher):
>>> def handle_close(self, ae_obj):
>>> ...
>>> # do something
>>> ...
>>> super(Derived, self).handle_close(ae_obj)
'''
if self.pollster(False):
self.log_info("unregister, dispatcher {:s}, ae {:s}".format(self,
self.pollster()))
self.pollster().unregister(self)
self.close()
# 4. Following methods are called by AsyncEvent directly. These methods are
# used when implementing higher level dispatcher classes, in order to do
# more sophisticated preparation (e.g. asynchronous TCP connection, SOCKS
# connection, etc.), before passing control to thos user implemented
# methods, e.g. handle_read(), handle_write(), readable(), writable() etc.
def handle_read_event(self, call_user_func = True):
if call_user_func:
self.handle_read()
def handle_write_event(self, call_user_func = True):
if call_user_func:
self.handle_write()
def handle_timeout_event(self, call_user_func = True):
if call_user_func:
self.handle_timeout()
def handle_expt_event(self, call_user_func = True):
if call_user_func:
self.handle_expt()
def monitor_readable(self, call_user_func = True):
if call_user_func:
return self.readable()
else:
return True
def monitor_writable(self, call_user_func = True):
if call_user_func:
return self.writable()
else:
return True
def monitor_timeout(self, call_user_func = True):
if call_user_func:
return self.timeout()
else:
return None
# -----------------------------------------------------------------------------
class ScheduledJob(_log.WrappedLogger):
'''Base class of scheduled job.'''
def __init__(self, log_handle = None):
_log.WrappedLogger.__init__(self, log_handle = log_handle)
self.__pollster = None
# def attach_to_pollster(self, pollster):
# if not isinstance(pollster, AsyncEvent):
# raise TypeError("{:s}: not instance of AsyncEvent".format(repr(pollster)))
# if self.__pollster:
# raise _error.AeAlreadyAttachedError
# self.__pollster = pollster
#
# def detach_from_pollster(self, pollster):
# if not isinstance(pollster, AsyncEvent):
# raise TypeError("{:s}: not instance of AsyncEvent".format(repr(pollster)))
# if not self.__pollster:
# raise _error.AeNotAttachedError
# self.__pollster = None
#
# def pollster(self, raise_exception = True):
# if not self.__pollster and raise_exception:
# raise _error.AeNotAttachedError
# return self.__pollster
# implement these two methods in derived classes
def schedule(self):
'''Determine whether we need to schedule this job in the future or not.
Returns:
Time in seconds (as float) since the Epoch; or None or 0, if this job
no longer need to be scheduled in the future.
'''
self.log_notice("{:s}.{:s}: schedule() not implement".format(
self.__class__.__module__, self.__class__.__name__))
return None
def handle_job_event(self):
'''Called to handle the job event.'''
self.log_notice("{:s}.{:s}: using default handle_timeout()".format(
self.__class__.__module__, self.__class__.__name__))
|
brianlions/python-nebula
|
nebula/asyncevent/_asyncevent.py
|
Python
|
gpl-3.0
| 30,921
|
[
"Brian"
] |
6b38e7e21ee2188c75b3dd5af4067793b99db42fb00e96a334fd5bfb13ac8cba
|
"""Code run on the client side for unstaging complete Pulsar jobs."""
import fnmatch
from contextlib import contextmanager
from logging import getLogger
from os.path import join, relpath
from json import loads
from ..action_mapper import FileActionMapper
from ..staging import COMMAND_VERSION_FILENAME
log = getLogger(__name__)
def finish_job(client, cleanup_job, job_completed_normally, client_outputs, pulsar_outputs):
"""Process for "un-staging" a complete Pulsar job.
This function is responsible for downloading results from remote
server and cleaning up Pulsar staging directory (if needed.)
"""
collection_failure_exceptions = []
if job_completed_normally:
output_collector = ClientOutputCollector(client)
action_mapper = FileActionMapper(client)
results_stager = ResultsCollector(output_collector, action_mapper, client_outputs, pulsar_outputs)
collection_failure_exceptions = results_stager.collect()
_clean(collection_failure_exceptions, cleanup_job, client)
return collection_failure_exceptions
class ClientOutputCollector:
def __init__(self, client):
self.client = client
def collect_output(self, results_collector, output_type, action, name):
# This output should have been handled by the Pulsar.
if not action.staging_action_local:
return False
working_directory = results_collector.client_outputs.working_directory
self.client.fetch_output(
path=action.path,
name=name,
working_directory=working_directory,
output_type=output_type,
action_type=action.action_type
)
return True
class ResultsCollector:
def __init__(self, output_collector, action_mapper, client_outputs, pulsar_outputs):
self.output_collector = output_collector
self.action_mapper = action_mapper
self.client_outputs = client_outputs
self.pulsar_outputs = pulsar_outputs
self.downloaded_working_directory_files = []
self.exception_tracker = DownloadExceptionTracker()
self.output_files = client_outputs.output_files
self.working_directory_contents = pulsar_outputs.working_directory_contents or []
self.metadata_directory_contents = pulsar_outputs.metadata_directory_contents or []
self.job_directory_contents = pulsar_outputs.job_directory_contents or []
def collect(self):
self.__collect_working_directory_outputs()
self.__collect_outputs()
self.__collect_version_file()
self.__collect_other_working_directory_files()
self.__collect_metadata_directory_files()
self.__collect_job_directory_files()
return self.exception_tracker.collection_failure_exceptions
def __collect_working_directory_outputs(self):
working_directory = self.client_outputs.working_directory
# Fetch explicit working directory outputs.
for source_file, output_file in self.client_outputs.work_dir_outputs:
name = relpath(source_file, working_directory)
if name not in self.working_directory_contents:
# Could be a glob
matching = fnmatch.filter(self.working_directory_contents, name)
if matching:
name = matching[0]
source_file = join(working_directory, name)
pulsar = self.pulsar_outputs.path_helper.remote_name(name)
if self._attempt_collect_output('output_workdir', path=output_file, name=pulsar):
self.downloaded_working_directory_files.append(pulsar)
# Remove from full output_files list so don't try to download directly.
try:
self.output_files.remove(output_file)
except ValueError:
raise Exception("Failed to remove {} from {}".format(output_file, self.output_files))
def __collect_outputs(self):
# Legacy Pulsar not returning list of files, iterate over the list of
# expected outputs for tool.
for output_file in self.output_files:
# Fetch output directly...
output_generated = self.pulsar_outputs.has_output_file(output_file)
if output_generated:
self._attempt_collect_output('output', output_file)
for galaxy_path, pulsar in self.pulsar_outputs.output_extras(output_file).items():
self._attempt_collect_output('output', path=galaxy_path, name=pulsar)
# else not output generated, do not attempt download.
def __collect_version_file(self):
version_file = self.client_outputs.version_file
pulsar_output_directory_contents = self.pulsar_outputs.output_directory_contents
if version_file and COMMAND_VERSION_FILENAME in pulsar_output_directory_contents:
self._attempt_collect_output('output', version_file, name=COMMAND_VERSION_FILENAME)
def __collect_other_working_directory_files(self):
self.__collect_directory_files(
self.client_outputs.working_directory,
self.working_directory_contents,
'output_workdir',
)
def __collect_metadata_directory_files(self):
self.__collect_directory_files(
self.client_outputs.metadata_directory,
self.metadata_directory_contents,
'output_metadata',
)
def __collect_job_directory_files(self):
self.__collect_directory_files(
self.client_outputs.job_directory,
self.job_directory_contents,
'output_jobdir',
)
def __realized_dynamic_file_source_references(self):
references = []
def record_references(from_dict):
if isinstance(from_dict, list):
for v in from_dict:
record_references(v)
elif isinstance(from_dict, dict):
for k, v in from_dict.items():
if k == "filename":
references.append(v)
if isinstance(v, (list, dict)):
record_references(v)
def parse_and_record_references(json_content):
try:
as_dict = loads(json_content)
record_references(as_dict)
except Exception as e:
log.warning("problem parsing galaxy.json %s" % e)
pass
realized_dynamic_file_sources = (self.pulsar_outputs.realized_dynamic_file_sources or [])
for realized_dynamic_file_source in realized_dynamic_file_sources:
contents = realized_dynamic_file_source["contents"]
source_type = realized_dynamic_file_source["type"]
assert source_type in ["galaxy", "legacy_galaxy"], source_type
if source_type == "galaxy":
parse_and_record_references(contents)
else:
for line in contents.splitlines():
parse_and_record_references(line)
return references
def __collect_directory_files(self, directory, contents, output_type):
if directory is None: # e.g. output_metadata_directory
return
dynamic_file_source_references = self.__realized_dynamic_file_source_references()
# Fetch remaining working directory outputs of interest.
for name in contents:
collect = False
if name in self.downloaded_working_directory_files:
continue
if self.client_outputs.dynamic_match(name):
collect = True
elif name in dynamic_file_source_references:
collect = True
if collect:
log.debug("collecting dynamic {} file {}".format(output_type, name))
output_file = join(directory, self.pulsar_outputs.path_helper.local_name(name))
if self._attempt_collect_output(output_type=output_type, path=output_file, name=name):
self.downloaded_working_directory_files.append(name)
def _attempt_collect_output(self, output_type, path, name=None):
# path is final path on galaxy server (client)
# name is the 'name' of the file on the Pulsar server (possible a relative)
# path.
collected = False
with self.exception_tracker():
action = self.action_mapper.action({"path": path}, output_type)
if self._collect_output(output_type, action, name):
collected = True
return collected
def _collect_output(self, output_type, action, name):
log.info("collecting output {} with action {}".format(name, action))
try:
return self.output_collector.collect_output(self, output_type, action, name)
except Exception as e:
if _allow_collect_failure(output_type):
log.warning(
"Allowed failure in postprocessing, will not force job failure but generally indicates a tool"
f" failure: {e}")
else:
raise
class DownloadExceptionTracker:
def __init__(self):
self.collection_failure_exceptions = []
@contextmanager
def __call__(self):
try:
yield
except Exception as e:
self.collection_failure_exceptions.append(e)
def _clean(collection_failure_exceptions, cleanup_job, client):
failed = (len(collection_failure_exceptions) > 0)
do_clean = (not failed and cleanup_job != "never") or cleanup_job == "always"
if do_clean:
message = "Cleaning up job (failed [%s], cleanup_job [%s])"
else:
message = "Skipping job cleanup (failed [%s], cleanup_job [%s])"
log.debug(message % (failed, cleanup_job))
if do_clean:
try:
client.clean()
except Exception:
log.warn("Failed to cleanup remote Pulsar job")
def _allow_collect_failure(output_type):
return output_type in ['output_workdir']
__all__ = ('finish_job',)
|
galaxyproject/pulsar
|
pulsar/client/staging/down.py
|
Python
|
apache-2.0
| 10,068
|
[
"Galaxy"
] |
6d1733dc9e36bd5ef0902d27c4b1e782349271e992df6920da1021d734ce8c80
|
#region GPLv3 notice
# Copyright 2014 Damian Quiroga
#
# This file is part of depict.
#
# depict is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# depict is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with depict. If not, see <http://www.gnu.org/licenses/>.
#endregion
from __future__ import absolute_import
from depict import Depict
@when(u'I run depict on it')
def step_impl(context):
context.depict = Depict()
context.depict.run(context.program_path)
context.cleanup_tasks.append(context.depict.stop)
@when(u'I open the app')
def step_impl(context):
context.execute_steps(u'When I visit "http://localhost:%s"' % context.depict.http_port)
@when(u'I search {fill_text}')
def step_impl(context, fill_text):
click_on_search(context.browser)
type_in_search(context.browser, '\b%s' % fill_text)
def click_on_search(browser):
browser.find_by_css(".selectize-input").click()
def type_in_search(browser, text):
control = browser.find_by_css("#search + .selectize-control")[0]
input_box = control.find_by_css("input")[0]
input_box.value = text
|
qdamian/depict
|
test/system/features/steps/when.py
|
Python
|
gpl-3.0
| 1,532
|
[
"VisIt"
] |
665c615576a74a37b0cad21267dd37028abd39bdc2872dfe29a91a8bce9aeb42
|
# -*- coding: utf-8 -*-
# @Author: tasdik
# @Date: 2016-04-11 23:50:46
# @Last Modified by: Tasdik Rahman
# @Last Modified time: 2016-04-12 14:31:45
# @GPLv3 License
# @http://tasdikrahman.me
# @https://github.com/tasdikrahman
HAM_TEXT_2= \
"""
Bro. Hope you are fine.
Hows the work going on ? Can you send me some updates on it.
And are you free tomorrow ?
No problem man. But please make sure you are finishing it
by friday night and sending me on on that day itself. As we
have to get it printed on Saturday.
"""
SPAM_TEXT_1= \
"""
My Dear Friend,
How are you and your family? I hope you all are fine.
My dear I know that this mail will come to you as a surprise, but it's for my
urgent need for a foreign partner that made me to contact you for your sincere
genuine assistance My name is Mr.Herman Hirdiramani, I am a banker by
profession currently holding the post of Director Auditing Department in
the Islamic Development Bank(IsDB)here in Ouagadougou, Burkina Faso.
I got your email information through the Burkina's Chamber of Commerce
and industry on foreign business relations here in Ouagadougou Burkina Faso
I haven'disclose this deal to any body I hope that you will not expose or
betray this trust and confident that I am about to repose on you for the
mutual benefit of our both families.
I need your urgent assistance in transferring the sum of Eight Million,
Four Hundred and Fifty Thousand United States Dollars ($8,450,000:00) into
your account within 14 working banking days This money has been dormant for
years in our bank without claim due to the owner of this fund died along with
his entire family and his supposed next of kin in an underground train crash
since years ago. For your further informations please visit
(http://news.bbc.co.uk/2/hi/5141542.stm)
"""
SPAM_TEXT_2 = \
"""
INTERNATIONAL MONETARY FUND (IMF)
DEPT: WORLD DEBT RECONCILIATION AGENCIES.
ADVISE: YOUR OUTSTANDING PAYMENT NOTIFICATION
Attention
A power of attorney was forwarded to our office this morning by two gentle men,
one of them is an American national and he is MR DAVID DEANE by name while the
other person is MR... JACK MORGAN by name a CANADIAN national.
This gentleman claimed to be your representative, and this power of attorney
stated that you are dead; they brought an account to replace your information
in other to claim your fund of (US$9.7M) which is now lying DORMANT and UNCLAIMED,
below is the new account they have submitted:
BANK.-HSBC CANADA
Vancouver, CANADA
ACCOUNT NO. 2984-0008-66
Be further informed that this power of attorney also stated that you suffered
and died of throat cancer. You are therefore given 24hrs to confirm the truth
in this information, if you are still alive, you are to contact us back
immediately, because we work 24 hrs just to ensure that we monitor all the
activities going on in regards to the transfer of beneficiary? inheritance
and contract payment.
You are to respond immediately for clarifications on this matter as we shall
be available 24 hrs to attend to you and give you the necessary guidelines on
how to ensure that your payment is wired to you immediately.
"""
import os
import unittest
from spammy import Spammy
PATH = os.path.dirname(os.path.abspath(__file__))
CORPUS_DATA = os.path.join(PATH, 'test_data')
cl = Spammy(CORPUS_DATA, limit=100)
cl.train()
class TestClassifier(unittest.TestCase):
"""Checks for the sanity of all module methods"""
def test_ham_text_2(self):
current_result = cl.classify(HAM_TEXT_2)
self.assertEqual(current_result, 'ham')
def test_spam_text_1(self):
current_result = cl.classify(SPAM_TEXT_1)
self.assertEqual(current_result, 'spam')
def test_spam_text_2(self):
current_result = cl.classify(SPAM_TEXT_2)
self.assertEqual(current_result, 'spam')
if __name__ == "__main__":
print cl.classify(SPAM_TEXT)
print cl.classify(HAM_TEXT)
unittest.main()
|
prodicus/spammy
|
tests/test_classifier.py
|
Python
|
gpl-3.0
| 4,039
|
[
"VisIt"
] |
3d1f48fd511cdf27e06edbdefd1a3a041ef407628f2c4463eb1bbcef61c80522
|
# Copyright 2010-2011 Canonical Ltd. This software is licensed under the
# GNU Lesser General Public License version 3 (see the file LICENSE).
import inspect
import logging
from configglue.schema import (
BoolOption,
Section,
DictOption,
IntOption,
ListOption,
Schema,
StringOption,
TupleOption,
)
from django import get_version, VERSION
from django.conf import global_settings
from django_configglue.utils import get_project_settings
# As in django.conf.global_settings:
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
class UpperCaseDictOption(DictOption):
""" A DictOption with all upper-case keys. """
def parse(self, section, parser=None, raw=False):
parsed = super(UpperCaseDictOption, self).parse(
section, parser, raw)
result = {}
for k, v in parsed.items():
result[k.upper()] = v
return result
def derivate_django_schema(schema, exclude=None):
"""Return a modified version of a schema.
The source schema *must* have a 'version' attribute and
a 'django' section.
The resulting schema is almost a copy of the original one, except
for excluded options in the 'django' section.
"""
if not exclude:
return schema
# create the schema class
cls = type(schema.__name__, (schema,), {'version': schema.version})
# include all non-excluded options
options = {}
for option in schema().django.options():
if option.name in exclude:
continue
options[option.name] = option
# create the 'django' section
django_section = type('django', (Section,), options)
setattr(cls, 'django', django_section)
return cls
class BaseDjangoSchema(Schema):
version = '1.0'
# Sections
class django(Section):
################
# CORE #
################
debug = BoolOption(default=True)
template_debug = BoolOption(default=True)
debug_propagate_exceptions = BoolOption(default=False,
help="Whether the framework should propagate raw exceptions "
"rather than catching them. This is useful under some "
"testing situations and should never be used on a live site.")
use_etags = BoolOption(default=False,
help="Whether to use the 'Etag' header. This saves bandwidth but "
"slows down performance.")
admins = ListOption(item=TupleOption(length=2), default=[],
help="People who get code error notifications. In the format "
"(('Full Name', 'email@domain.com'), "
"('Full Name', 'anotheremail@domain.com'))")
internal_ips = TupleOption(default=(),
help="Tuple of IP addresses, as strings, that see debug comments, "
"when DEBUG is true and receive x-headers")
time_zone = StringOption(default='America/Chicago', null=True,
help="Local time zone for this installation. All choices can be "
"found here: "
"http://en.wikipedia.org/wiki/List_of_tz_zones_by_name "
"(although not all systems may support all possibilities)")
language_code = StringOption(default='en-us',
help="Language code for this installation. All choices can be "
"found here: "
"http://www.i18nguy.com/unicode/language-identifiers.html")
languages = ListOption(
item=TupleOption(length=2),
default=[('ar', gettext_noop('Arabic')),
('bn', gettext_noop('Bengali')),
('bg', gettext_noop('Bulgarian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('es', gettext_noop('Spanish')),
('et', gettext_noop('Estonian')),
('es-ar', gettext_noop('Argentinean Spanish')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('hu', gettext_noop('Hungarian')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('ko', gettext_noop('Korean')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('lv', gettext_noop('Latvian')),
('lt', gettext_noop('Lithuanian')),
('mk', gettext_noop('Macedonian')),
('nl', gettext_noop('Dutch')),
('no', gettext_noop('Norwegian')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sr', gettext_noop('Serbian')),
('sv', gettext_noop('Swedish')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('uk', gettext_noop('Ukrainian')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese'))],
help="Languages we provide translations for, out of the box. "
"The language name should be the utf-8 encoded local name "
"for the language")
languages_bidi = TupleOption(default=('he', 'ar', 'fa'),
help="Languages using BiDi (right-to-left) layout")
use_i18n = BoolOption(default=True,
help="If you set this to False, Django will make some "
"optimizations so as not to load the internationalization "
"machinery")
locale_paths = ListOption(item=StringOption())
language_cookie_name = StringOption(default='django_language')
managers = ListOption(item=TupleOption(length=2), default=[],
help="Not-necessarily-technical managers of the site. They get "
"broken link notifications and other various e-mails")
default_content_type = StringOption(default='text/html',
help="Default content type and charset to use for all "
"HttpResponse objects, if a MIME type isn't manually "
"specified. These are used to construct the Content-Type "
"header")
default_charset = StringOption(default='utf-8')
file_charset = StringOption(default='utf-8',
help="Encoding of files read from disk (template and initial "
"SQL files)")
server_email = StringOption(
help="E-mail address that error messages come from",
default='root@localhost')
send_broken_link_emails = BoolOption(default=False,
help="Whether to send broken-link e-mails")
database_engine = StringOption(default='',
help="'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3'"
" or 'oracle'")
database_name = StringOption(default='',
help="Or path to database file if using sqlite3")
database_user = StringOption(default='',
help="Not used with sqlite3")
database_password = StringOption(default='',
help="Not used with sqlite3")
database_host = StringOption(default='',
help="Set to empty string for localhost. Not used with sqlite3")
database_port = StringOption(default='',
help="Set to empty string for default. Not used with sqlite3")
database_options = DictOption(
help="Set to empty dictionary for default")
email_host = StringOption(default='localhost',
help="Host for sending e-mail")
email_port = IntOption(default=25,
help="Port for sending e-mail")
email_host_user = StringOption(default='',
help="Optional SMTP authentication information for EMAIL_HOST")
email_host_password = StringOption(default='',
help="Optional SMTP authentication information for EMAIL_HOST")
email_use_tls = BoolOption(default=False,
help="Optional SMTP authentication information for EMAIL_HOST")
installed_apps = ListOption(item=StringOption(),
default=['django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites'],
help="List of strings representing installed apps")
template_dirs = ListOption(item=StringOption(),
help="List of locations of the template source files, in search "
"order")
template_loaders = ListOption(item=StringOption(),
default=[
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
],
help="List of callables that know how to import templates from "
"various sources")
template_context_processors = ListOption(
item=StringOption(),
default=['django.core.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media'],
help="List of processors used by RequestContext to populate the "
"context. Each one should be a callable that takes the "
"request object as its only parameter and returns a "
"dictionary to add to the context")
template_string_if_invalid = StringOption(default='',
help="Output to use in template system for invalid "
"(e.g. misspelled) variables")
admin_media_prefix = StringOption(default='/media/',
help="URL prefix for admin media -- CSS, JavaScript and images. "
"Make sure to use a trailing slash. "
"Examples: 'http://foo.com/media/', '/media/'")
default_from_email = StringOption(
default='webmaster@localhost',
help="Default e-mail address to use for various automated "
"correspondence from the site managers")
email_subject_prefix = StringOption(default='[Django] ',
help="Subject-line prefix for email messages send with "
"django.core.mail.mail_admins or ...mail_managers. Make sure "
"to include the trailing space")
append_slash = BoolOption(default=True,
help="Whether to append trailing slashes to URLs")
prepend_www = BoolOption(default=False,
help="Whether to prepend the 'www.' subdomain to URLs that "
"don't have it")
force_script_name = StringOption(null=True,
help="Override the server-derived value of SCRIPT_NAME")
disallowed_user_agents = ListOption(
item=StringOption(),
default=[],
help="List of compiled regular expression objects representing "
"User-Agent strings that are not allowed to visit any page, "
"systemwide. Use this for bad robots/crawlers")
absolute_url_overrides = DictOption()
allowed_include_roots = TupleOption(
help="Tuple of strings representing allowed prefixes for the "
"{% ssi %} tag")
admin_for = ListOption(item=StringOption(),
help="If this is a admin settings module, this should be a list "
"of settings modules (in the format 'foo.bar.baz') for which "
"this admin is an admin")
ignorable_404_starts = ListOption(item=StringOption(),
default=['/cgi-bin/', '/_vti_bin', '/_vti_inf'],
help="404s that may be ignored")
ignorable_404_ends = ListOption(item=StringOption(),
default=['mail.pl', 'mailform.pl', 'mail.cgi', 'mailform.cgi',
'favicon.ico', '.php'])
secret_key = StringOption(raw=True, default='',
help="A secret key for this particular Django installation. Used "
"in secret-key hashing algorithms. Set this in your "
"settings, or Django will complain loudly")
jing_path = StringOption(default='/usr/bin/jing',
help="Path to the 'jing' executable -- needed to validate "
"XMLFields")
default_file_storage = StringOption(
default='django.core.files.storage.FileSystemStorage',
help="Default file storage mechanism that holds media")
media_root = StringOption(default='',
help="Absolute path to the directory that holds media")
media_url = StringOption(default='',
help="URL that handles the media served from MEDIA_ROOT")
file_upload_handlers = ListOption(item=StringOption(),
default=[
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler'],
help="List of upload handler classes to be applied in order")
file_upload_max_memory_size = IntOption(default=2621440,
help="Maximum size, in bytes, of a request before it will be "
"streamed to the file system instead of into memory")
file_upload_temp_dir = StringOption(null=True,
help="Directory in which upload streamed files will be "
"temporarily saved. A value of `None` will make Django use "
"the operating system's default temporary directory (i.e. "
"'/tmp' on *nix systems)")
file_upload_permissions = StringOption(null=True,
help="The numeric mode to set newly-uploaded files to. The value "
"should be a mode you'd pass directly to os.chmod; "
"see http://docs.python.org/lib/os-file-dir.html")
date_format = StringOption(default='N j, Y',
help="Default formatting for date objects. See all available "
"format strings here: "
"http://docs.djangoproject.com/en/dev/ref/templates/builtins/"
"#now")
datetime_format = StringOption(default='N j, Y, P',
help="Default formatting for datetime objects. See all available "
"format strings here: "
"http://docs.djangoproject.com/en/dev/ref/templates/builtins/"
"#now")
time_format = StringOption(default='P',
help="Default formatting for time objects. See all available "
"format strings here: "
"http://docs.djangoproject.com/en/dev/ref/templates/builtins/"
"#now")
year_month_format = StringOption(default='F Y',
help="Default formatting for date objects when only the year and "
"month are relevant. See all available format strings here: "
"http://docs.djangoproject.com/en/dev/ref/templates/builtins/"
"#now")
month_day_format = StringOption(default='F j',
help="Default formatting for date objects when only the month and "
"day are relevant. See all available format strings here: "
"http://docs.djangoproject.com/en/dev/ref/templates/builtins/"
"#now")
transactions_managed = BoolOption(default=False,
help="Do you want to manage transactions manually? "
"Hint: you really don't!")
url_validator_user_agent = StringOption(
default="Django/%s (http://www.djangoproject.com)" % get_version(),
help="The User-Agent string to use when checking for URL validity "
"through the isExistingURL validator")
default_tablespace = StringOption(default='',
help="The tablespaces to use for each model when not "
"specified otherwise")
default_index_tablespace = StringOption(default='',
help="The tablespaces to use for each model when not "
"specified otherwise")
##############
# MIDDLEWARE #
##############
middleware_classes = ListOption(item=StringOption(),
default=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware'],
help="List of middleware classes to use. Order is important; in "
"the request phase, these middleware classes will be applied "
"in the order given, and in the response phase the "
"middleware will be applied in reverse order")
############
# SESSIONS #
############
session_cookie_name = StringOption(default='sessionid',
help="Cookie name")
session_cookie_age = IntOption(default=60 * 60 * 24 * 7 * 2,
help="Age of cookie, in seconds (default: 2 weeks)")
session_cookie_domain = StringOption(null=True,
help="A string like '.lawrence.com', or None for standard "
"domain cookie")
session_cookie_secure = BoolOption(default=False,
help="Wether the session cookie should be secure (https:// only)")
session_cookie_path = StringOption(default='/',
help="The path of the sesion cookie")
session_save_every_request = BoolOption(default=False,
help="Whether to save the session data on every request")
session_expire_at_browser_close = BoolOption(default=False,
help="Whether a user's session cookie expires when the Web "
"browser is closed")
session_engine = StringOption(
default='django.contrib.sessions.backends.db',
help="The module to store session data")
session_file_path = StringOption(null=True,
help="Directory to store session files if using the file session "
"module. If None, the backend will use a sensible default")
#########
# CACHE #
#########
cache_backend = StringOption(default='locmem://',
help="The cache backend to use. See the docstring in "
"django.core.cache for the possible values")
cache_middleware_key_prefix = StringOption(default='')
cache_middleware_seconds = IntOption(default=600)
####################
# COMMENTS #
####################
comments_allow_profanities = BoolOption(default=False)
profanities_list = ListOption(item=StringOption(),
default=['asshat', 'asshead', 'asshole', 'cunt', 'fuck', 'gook',
'nigger', 'shit'],
help="The profanities that will trigger a validation error in the "
"'hasNoProfanities' validator. All of these should be in "
"lowercase")
##################
# AUTHENTICATION #
##################
authentication_backends = ListOption(
item=StringOption(),
default=['django.contrib.auth.backends.ModelBackend'])
login_url = StringOption(default='/accounts/login/')
logout_url = StringOption(default='/accounts/logout/')
login_redirect_url = StringOption(default='/accounts/profile/')
password_reset_timeout_days = IntOption(default=3,
help="The number of days a password reset link is valid for")
###########
# TESTING #
###########
test_runner = StringOption(
default='django.test.simple.run_tests',
help="The name of the method to use to invoke the test suite")
test_database_name = StringOption(null=True,
help="The name of the database to use for testing purposes. "
"If None, a name of 'test_' + DATABASE_NAME will be assumed")
test_database_charset = StringOption(null=True,
help="Strings used to set the character set and collation order "
"for the test database. These values are passed literally to "
"the server, so they are backend-dependent. If None, no "
"special settings are sent (system defaults are used)")
test_database_collation = StringOption(null=True,
help="Strings used to set the character set and collation order "
"for the test database. These values are passed literally to "
"the server, so they are backend-dependent. If None, no "
"special settings are sent (system defaults are used)")
############
# FIXTURES #
############
fixture_dirs = ListOption(item=StringOption(),
help="The list of directories to search for fixtures")
####################
# PROJECT TEMPLATE #
####################
site_id = IntOption(default=1)
# use a slightly different default than in the project settings
# template as it includes the {{ project_name }} variable
# not relying on that variable makes more sense in this case
root_urlconf = StringOption(default='urls')
Django11Base = derivate_django_schema(
BaseDjangoSchema, exclude=['jing_path'])
class Django11Schema(Django11Base):
version = '1.1'
# sections
class django(Django11Base.django):
################
# CORE #
################
# update default value
languages = ListOption(
item=TupleOption(length=2),
help="Languages we provide translations for, out of the box. "
"The language name should be the utf-8 encoded local name "
"for the language",
default=[
('ar', gettext_noop('Arabic')),
('bg', gettext_noop('Bulgarian')),
('bn', gettext_noop('Bengali')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinean Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy-nl', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('nl', gettext_noop('Dutch')),
('no', gettext_noop('Norwegian')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('uk', gettext_noop('Ukrainian')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
])
class Django12Schema(Django11Schema):
version = '1.2'
# sections
class django(Django11Schema.django):
################
# CORE #
################
# update default value
languages = ListOption(
item=TupleOption(length=2),
help="Languages we provide translations for, out of the box. "
"The language name should be the utf-8 encoded local name "
"for the language",
default=[
('ar', gettext_noop('Arabic')),
('bg', gettext_noop('Bulgarian')),
('bn', gettext_noop('Bengali')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-gb', gettext_noop('British English')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy-nl', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('id', gettext_noop('Indonesian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('nl', gettext_noop('Dutch')),
('no', gettext_noop('Norwegian')),
('nb', gettext_noop('Norwegian Bokmal')),
('nn', gettext_noop('Norwegian Nynorsk')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('uk', gettext_noop('Ukrainian')),
('vi', gettext_noop('Vietnamese')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
])
use_l10n = BoolOption(
default=True,
help="If you set this to False, Django will not format dates, "
"numbers and calendars according to the current locale")
databases = DictOption(
item=UpperCaseDictOption(spec={
'engine': StringOption(default='django.db.backends.'),
'name': StringOption(),
'user': StringOption(),
'password': StringOption(),
'host': StringOption(),
'port': StringOption(),
}),
default={
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
})
database_routers = ListOption(
item=StringOption(),
help="Classes used to implement db routing behaviour")
email_backend = StringOption(
default='django.core.mail.backends.smtp.EmailBackend',
help="The email backend to use. For possible shortcuts see "
"django.core.mail. The default is to use the SMTP backend. "
"Third party backends can be specified by providing a Python "
"path to a module that defines an EmailBackend class.")
installed_apps = ListOption(item=StringOption(),
help="List of strings representing installed apps",
default=[
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
])
template_loaders = ListOption(item=StringOption(),
help="List of callables that know how to import templates from "
"various sources",
default=[
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
])
template_context_processors = ListOption(
item=StringOption(),
help="List of processors used by RequestContext to populate the "
"context. Each one should be a callable that takes the "
"request object as its only parameter and returns a "
"dictionary to add to the context",
default=[
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.contrib.messages.context_processors.messages',
])
use_x_forwarded_host = BoolOption(default=False,
help="A boolean that specifies whether to use the "
"X-Forwarded-Host header in preference to the Host header. "
"This should only be enabled if a proxy which sets this "
"header is in use.")
format_module_path = StringOption(
null=True, default=None,
help="Python module path where user will place custom format "
"definition. The directory where this setting is pointing "
"should contain subdirectories named as the locales, "
"containing a formats.py file")
short_date_format = StringOption(
default='m/d/Y',
help="Default short formatting for date objects")
short_datetime_format = StringOption(
default='m/d/Y P',
help="Default short formatting for datetime objects")
date_input_formats = ListOption(
item=StringOption(),
default=[
# '2006-10-25', '10/25/2006', '10/25/06'
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y',
# 'Oct 25 2006', 'Oct 25, 2006'
'%b %d %Y', '%b %d, %Y',
# '25 Oct 2006', '25 Oct, 2006'
'%d %b %Y', '%d %b, %Y',
# 'October 25 2006', 'October 25, 2006'
'%B %d %Y', '%B %d, %Y',
# '25 October 2006', '25 October, 2006'
'%d %B %Y', '%d %B, %Y',
],
help="Default formats to be used when parsing dates from input "
"boxes, in order")
time_input_formats = ListOption(
item=StringOption(),
default=[
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
],
help="Default formats to be used when parsing times from input "
"boxes, in order")
datetime_input_formats = ListOption(
item=StringOption(),
default=[
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
],
help="Default formats to be used when parsing dates and times "
"from input boxes, in order")
first_day_of_week = IntOption(
default=0,
help="First day of week, to be used on calendars. 0 means Sunday, "
"1 means Monday...")
decimal_separator = StringOption(
default='.',
help="Decimal separator symbol")
use_thousand_separator = BoolOption(
default=False,
help="Boolean that sets whether to add thousand separator when "
"formatting numbers")
number_grouping = IntOption(
default=0,
help="Number of digits that will be together, when splitting them "
"by THOUSAND_SEPARATOR. 0 means no grouping, 3 means "
"splitting by thousands...")
thousand_separator = StringOption(
default=',',
help="Thousand separator symbol")
##############
# MIDDLEWARE #
##############
middleware_classes = ListOption(item=StringOption(),
help="List of middleware classes to use. Order is important; in "
"the request phase, these middleware classes will be applied "
"in the order given, and in the response phase the "
"middleware will be applied in reverse order",
default=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
])
########
# CSRF #
########
csrf_failure_view = StringOption(
default='django.views.csrf.csrf_failure',
help="Dotted path to callable to be used as view when a request "
"is rejected by the CSRF middleware")
csrf_cookie_name = StringOption(
default='csrftoken',
help="Name for CSRF cookie")
csrf_cookie_domain = StringOption(
null=True,
help="Domain for CSRF cookie")
############
# MESSAGES #
############
message_storage = StringOption(
default='django.contrib.messages.storage.user_messages.'
'LegacyFallbackStorage',
help="Class to be used as messages backend")
###########
# TESTING #
###########
test_runner = StringOption(
default='django.test.simple.DjangoTestSuiteRunner',
help="The name of the class to use to run the test suite")
Django13Base = derivate_django_schema(
Django12Schema,
exclude=[
'cache_backend',
])
class Django13Schema(Django13Base):
version = '1.3'
class django(Django13Base.django):
# update default value
languages = ListOption(
item=TupleOption(length=2),
help="Languages we provide translations for, out of the box. "
"The language name should be the utf-8 encoded local name "
"for the language",
default=[
('ar', gettext_noop('Arabic')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('bn', gettext_noop('Bengali')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-gb', gettext_noop('British English')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy-nl', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('id', gettext_noop('Indonesian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('nl', gettext_noop('Dutch')),
('no', gettext_noop('Norwegian')),
('nb', gettext_noop('Norwegian Bokmal')),
('nn', gettext_noop('Norwegian Nynorsk')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
])
installed_apps = ListOption(item=StringOption(),
help="List of strings representing installed apps",
default=[
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
])
template_context_processors = ListOption(
item=StringOption(),
help="List of processors used by RequestContext to populate the "
"context. Each one should be a callable that takes the "
"request object as its only parameter and returns a "
"dictionary to add to the context",
default=[
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
])
static_root = StringOption(
default='',
help='Absolute path to the directory that holds static files.')
static_url = StringOption(
null=True, default='/static/',
help='URL that handles the static files served from STATIC_ROOT.')
############
# SESSIONS #
############
session_cookie_httponly = BoolOption(
default=False,
help="Whether to use the non-RFC standard htt pOnly flag (IE, "
"FF3+, others)")
#########
# CACHE #
#########
caches = DictOption(
item=UpperCaseDictOption(spec={
'backend': StringOption(),
'location': StringOption()})
)
cache_middleware_alias = StringOption(default='default')
############
# COMMENTS #
############
profanities_list = ListOption(item=StringOption(),
default=[],
help="The profanities that will trigger a validation error in the "
"'hasNoProfanities' validator. All of these should be in "
"lowercase")
comments_banned_users_group = StringOption(
null=True,
help="The group ID that designates which users are banned. "
"Set to None if you're not using it")
comments_moderators_group = StringOption(
null=True,
help="The group ID that designates which users can moderate "
"comments. Set to None if you're not using it")
comments_sketchy_users_group = StringOption(
null=True,
help="The group ID that designates the users whose comments "
"should be e-mailed to MANAGERS. Set to None if you're not "
"using it")
comments_first_few = IntOption(
default=0,
help="The system will e-mail MANAGERS the first "
"COMMENTS_FIRST_FEW comments by each user. Set this to 0 if "
"you want to disable it")
banned_ips = TupleOption(
help="A tuple of IP addresses that have been banned from "
"participating in various Django-powered features")
###########
# LOGGING #
###########
logging_config = StringOption(null=True,
default='django.utils.log.dictConfig',
help='The callable to use to configure logging')
logging = DictOption(
spec={
'version': IntOption(default=1),
'formatters': DictOption(
item=DictOption(
spec={
'format': StringOption(null=True),
'datefmt': StringOption(null=True)})),
'filters': DictOption(
item=DictOption(
spec={'name': StringOption()})),
'handlers': DictOption(
item=DictOption(
spec={
'class': StringOption(fatal=True),
'level': StringOption(),
'formatter': StringOption(),
'filters': StringOption()})),
'loggers': DictOption(
item=DictOption(
spec={
'level': StringOption(),
'propagate': BoolOption(),
'filters': ListOption(item=StringOption()),
'handlers': ListOption(item=StringOption()),
})),
'root': DictOption(
spec={
'level': StringOption(),
'filters': ListOption(item=StringOption()),
'handlers': ListOption(item=StringOption()),
}),
'incremental': BoolOption(default=False),
'disable_existing_loggers': BoolOption(default=False),
},
default={
'version': 1,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
},
'disable_existing_loggers': False,
},
help="The default logging configuration. This sends an email to "
"the site admins on every HTTP 500 error. All other records "
"are sent to the bit bucket.")
###############
# STATICFILES #
###############
staticfiles_dirs = ListOption(
item=StringOption(),
help='A list of locations of additional static files')
staticfiles_storage = StringOption(
default='django.contrib.staticfiles.storage.StaticFilesStorage',
help='The default file storage backend used during the build '
'process')
staticfiles_finders = ListOption(
item=StringOption(),
default=[
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
],
help='List of finder classes that know how to find static files '
'in various locations.')
admin_media_prefix = StringOption(default='/static/admin/',
help="URL prefix for admin media -- CSS, JavaScript and images. "
"Make sure to use a trailing slash. "
"Examples: 'htttp://foo.com/media/', '/media/'")
class Django136Schema(Django13Schema):
version = '1.3.6'
class django(Django13Schema.django):
allowed_hosts = ListOption(
item=StringOption(),
help="A list of strings representing the host/domain names "
"that this Django site can serve. This is a security "
"measure to prevent an attacker from poisoning caches and "
"password reset emails with links to malicious hosts by "
"submitting requests with a fake HTTP Host header, which is "
"possible even under many seemingly-safe webserver "
"configurations.")
Django14Base = derivate_django_schema(
Django13Schema,
exclude=[
'admin_media_prefix',
'ignorable_404_starts',
'ignorable_404_ends',
'banned_ips',
'comments_banned_users_group',
'comments_moderators_group',
'comments_sketchy_users_group',
'comments_first_few',
'database_engine',
'database_host',
'database_name',
'database_options',
'database_password',
'database_port',
'database_user',
'test_database_charset',
'test_database_collation',
'test_database_name',
])
class Django14Schema(Django14Base):
version = '1.4'
class django(Django14Base.django):
wsgi_application = StringOption(
help="The full Python path of the WSGI application object"
"that Django's built-in servers (e.g. runserver) will use.",
null=True)
csrf_cookie_secure = BoolOption(
default=False,
help='Whether to use a secure cookie for the CSRF '
'cookie. If this is set to True, the cookie will be marked '
'as "secure," which means browsers may ensure that the '
'cookie is only sent under an HTTPS connection.')
csrf_cookie_path = StringOption(
default='/',
help="The path set on the CSRF cookie. This should either "
"match the URL path of your Django installation or be a "
"parent of that path.")
secure_proxy_ssl_header = TupleOption(
length=2,
default=None,
help="A tuple representing a HTTP header/value combination "
"that signifies a request is secure. This controls the "
"behavior of the request object's is_secure() method.")
ignorable_404_urls = ListOption(
item=StringOption(),
help="List of compiled regular expression objects "
"describing URLs that should be ignored when reporting HTTP "
"404 errors via email (see Error reporting). Use this if "
"your site does not provide a commonly requested file such "
"as favicon.ico or robots.txt, or if it gets hammered by "
"script kiddies.")
password_hashers = ListOption(
item=StringOption(),
help="This is a list of hashing algorithm classes that this "
"Django installation supports. The first entry in this list "
"(that is, settings.PASSWORD_HASHERS[0]) will be used to "
"store passwords, and all the other entries are valid "
"hashers that can be used to check existing passwords.",
default=[
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
]
)
x_frame_options = StringOption(
default='SAMEORIGIN',
help="The default value for the X-Frame-Options header used "
"by XFrameOptionsMiddleware.")
use_tz = BoolOption(
default=True,
help="A boolean that specifies if datetimes will be timezone-aware"
" by default or not. If this is set to True, Django will use "
"timezone-aware datetimes internally. Otherwise, Django will "
"use naive datetimes in local time.")
default_exception_reporter_filter = StringOption(
default='django.views.debug.SafeExceptionReporterFilter',
help="Default exception reporter filter class to be used if none "
"has been assigned to the HttpRequest instance yet.")
signing_backend = StringOption(
default='django.core.signing.TimestampSigner',
help="The backend used for signing cookies and other data.")
url_validator_user_agent = StringOption(
default=("Django/%s (https://www.djangoproject.com)" %
get_version()),
help="The User-Agent string to use when checking for URL validity "
"through the isExistingURL validator")
message_storage = StringOption(
default='django.contrib.messages.storage.fallback.'
'FallbackStorage',
help="Class to be used as messages backend")
logging = DictOption(
spec={
'version': IntOption(default=1),
'formatters': DictOption(
item=DictOption(
spec={
'format': StringOption(null=True),
'datefmt': StringOption(null=True)})),
'filters': DictOption(
item=DictOption(
spec={'name': StringOption()})),
'handlers': DictOption(
item=DictOption(
spec={
'class': StringOption(fatal=True),
'level': StringOption(),
'formatter': StringOption(),
'filters': StringOption()})),
'loggers': DictOption(
item=DictOption(
spec={
'level': StringOption(),
'propagate': BoolOption(),
'filters': ListOption(item=StringOption()),
'handlers': ListOption(item=StringOption()),
})),
'root': DictOption(
spec={
'level': StringOption(),
'filters': ListOption(item=StringOption()),
'handlers': ListOption(item=StringOption()),
}),
'incremental': BoolOption(default=False),
'disable_existing_loggers': BoolOption(default=False),
},
default={
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
},
help="The default logging configuration. This sends an email to "
"the site admins on every HTTP 500 error. All other records "
"are sent to the bit bucket.")
template_context_processors = ListOption(
item=StringOption(),
default=['django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
help="List of processors used by RequestContext to populate the "
"context. Each one should be a callable that takes the "
"request object as its only parameter and returns a "
"dictionary to add to the context")
languages = ListOption(
item=TupleOption(length=2),
default=[
('ar', gettext_noop('Arabic')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('bn', gettext_noop('Bengali')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy-nl', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('id', gettext_noop('Indonesian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('nb', gettext_noop('Norwegian Bokmal')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese'))],
help="Languages we provide translations for, out of the box. "
"The language name should be the utf-8 encoded local name "
"for the language")
session_cookie_httponly = BoolOption(
default=True,
help="Whether to use the non-RFC standard htt pOnly flag (IE, "
"FF3+, others)")
datetime_input_formats = ListOption(
item=StringOption(),
default=[
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
],
help="Default formats to be used when parsing dates and times "
"from input boxes, in order")
class Django144Schema(Django14Schema):
version = '1.4.4'
class django(Django14Schema.django):
allowed_hosts = ListOption(
help="A list of strings representing the host/domain names "
"that this Django site can serve. This is a security "
"measure to prevent an attacker from poisoning caches and "
"password reset emails with links to malicious hosts by "
"submitting requests with a fake HTTP Host header, which is "
"possible even under many seemingly-safe webserver "
"configurations.")
languages = ListOption(
item=TupleOption(length=2),
default=[
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy-nl', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('nb', gettext_noop('Norwegian Bokmal')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),],
help="Languages we provide translations for, out of the box. "
"The language name should be the utf-8 encoded local name "
"for the language")
class Django145Schema(Django144Schema):
version = '1.4.5'
class django(Django144Schema.django):
languages = ListOption(
item=TupleOption(length=2),
default=[
('ar', gettext_noop('Arabic')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('bn', gettext_noop('Bengali')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy-nl', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('id', gettext_noop('Indonesian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('nb', gettext_noop('Norwegian Bokmal')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),],
help="Languages we provide translations for, out of the box. "
"The language name should be the utf-8 encoded local name "
"for the language")
Django15Base = derivate_django_schema(
Django145Schema,
exclude=[
'url_validator_user_agent',
])
class Django15Schema(Django15Base):
version = '1.5'
class django(Django15Base.django):
caches = DictOption(
item=UpperCaseDictOption(
spec={
'backend': StringOption(),
'location': StringOption()
}),
default={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
})
auth_user_model = StringOption(
default='auth.User',
help="The model to use to represent a User.")
session_cache_alias = StringOption(
default='default',
help="If you're using cache-based session storage, this selects "
" the cache to use.")
password_hashers = ListOption(
item=StringOption(),
help="This is a list of hashing algorithm classes that this "
"Django installation supports. The first entry in this list "
"(that is, settings.PASSWORD_HASHERS[0]) will be used to "
"store passwords, and all the other entries are valid "
"hashers that can be used to check existing passwords.",
default=[
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher',
'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
])
class DjangoSchemaFactory(object):
def __init__(self):
self._schemas = {}
def register(self, schema_cls, version=None):
if version is None:
# fall back to looking the version of the schema class
version = getattr(schema_cls, 'version', None)
if version is None:
raise ValueError(
"No version was specified nor found in schema %r" % schema_cls)
self._schemas[version] = schema_cls
def get(self, version, strict=True):
if version in self._schemas:
return self._schemas[version]
msg = "No schema registered for version %r" % version
if strict:
raise ValueError(msg)
else:
logging.warn(msg)
logging.warn("Dynamically creating schema for version %r" % version)
schema = self.build(version)
return schema
def build(self, version_string=None, options=None,
BaseSchema=BaseDjangoSchema):
if version_string is None:
version_string = get_version()
if options is None:
project_settings = get_project_settings()
options = dict([(name.lower(), value) for (name, value) in
inspect.getmembers(global_settings) if name.isupper()])
project_options = dict([(name.lower(), value) for (name, value) in
inspect.getmembers(project_settings) if name.isupper()])
# handle special case of ROOT_URLCONF which depends on the
# project name
root_urlconf = project_options['root_urlconf'].replace(
'{{ project_name }}.', '')
project_options['root_urlconf'] = root_urlconf
options.update(project_options)
try:
base_version = '{0}.{1}'.format(*VERSION[:2])
BaseSchema = self.get(base_version)
except ValueError:
pass
section_base_class = getattr(BaseSchema, 'django', Section)
class DjangoSchema(BaseSchema):
version = version_string
class django(section_base_class):
pass
def get_option_type(name, value):
type_mappings = {
int: IntOption,
bool: BoolOption,
list: ListOption,
tuple: TupleOption,
dict: DictOption,
str: StringOption,
unicode: StringOption,
}
if value is None:
# Special casing strange value, which by default is None but
# should be set to tuple.
if name == 'secure_proxy_ssl_header':
return TupleOption(name=name, default=None)
return StringOption(name=name, default=value, null=True)
else:
# Clean up values comming from the project template and having
# {{ }} substitutions in them.
if name in ('secret_key', 'wsgi_application'):
value = ''
option_type = type_mappings[type(value)]
kwargs = {'name': name, 'default': value}
if option_type in (DictOption, ListOption):
# get inner item type
if option_type == DictOption:
items = value.values()
else:
items = value
item_type = None
if items:
item_type = type_mappings.get(type(items[0]), None)
# make sure all items have a consistent type
for item in items:
current_item_type = type_mappings.get(
type(item), None)
if current_item_type != item_type:
item_type = None
# mismatching types found. fallback to default
# item type
break
if item_type is not None:
kwargs['item'] = item_type()
return option_type(**kwargs)
for name, value in options.items():
if name == '__CONFIGGLUE_PARSER__':
continue
if not hasattr(DjangoSchema.django, name):
option = get_option_type(name, value)
setattr(DjangoSchema.django, name, option)
# register schema for it to be available during next query
self.register(DjangoSchema, version_string)
return DjangoSchema
schemas = DjangoSchemaFactory()
schemas.register(BaseDjangoSchema)
schemas.register(Django13Schema)
schemas.register(Django136Schema)
schemas.register(Django136Schema, '1.3.7')
schemas.register(Django14Schema)
schemas.register(Django144Schema)
schemas.register(Django145Schema)
schemas.register(Django15Schema)
schemas.register(Django15Schema, '1.5.1')
|
miing/mci_migo_packages_django-configglue
|
django_configglue/schema.py
|
Python
|
lgpl-3.0
| 79,058
|
[
"VisIt"
] |
4325791ccf5f4c9bd2582abd25d35c5734903bf6a4ff23a5e86479ea0484f771
|
# -*- coding: utf-8 -*-
"""Tests for instantiating the manager"""
import os
import tempfile
import unittest
from pybel import Manager
from pybel.manager.base_manager import build_engine_session
try:
from unittest import mock
except ImportError:
import mock
class TestInstantiation(unittest.TestCase):
"""Allows for testing with a consistent connection without changing the configuration."""
def setUp(self):
"""Add two class-level variables: ``mock_global_connection`` and ``mock_module_connection`` that can be
used as context managers to mock the bio2bel connection getter functions."""
self.fd, self.path = tempfile.mkstemp()
self.connection = "sqlite:///" + self.path
def mock_connection():
"""Get the connection enclosed by this class.
:rtype: str
"""
return self.connection
self.mock_connection = mock.patch("pybel.manager.cache_manager.get_cache_connection", mock_connection)
def tearDown(self):
os.close(self.fd)
os.remove(self.path)
def test_fail_connection_none(self):
"""Test that a None causes a huge error."""
with self.assertRaises(ValueError):
build_engine_session(None)
def test_instantiate_init(self):
"""Test what happens when no connection is specified for the normal constructor."""
with self.mock_connection:
manager = Manager()
self.assertEqual(self.connection, str(manager.engine.url))
def test_instantiate_manager_positional(self):
manager = Manager(self.connection)
self.assertEqual(self.connection, str(manager.engine.url))
def test_instantiate_manager_positional_with_keyword(self):
manager = Manager(self.connection, echo=False)
self.assertEqual(self.connection, str(manager.engine.url))
def test_instantiate_manager_fail_positional(self):
with self.assertRaises(ValueError):
Manager(self.connection, True)
def test_instantiate_manager_keyword(self):
manager = Manager(connection=self.connection)
self.assertEqual(self.connection, str(manager.engine.url))
def test_instantiate_manager_connection_fail_too_many_keyword(self):
with self.assertRaises(ValueError):
Manager(connection=self.connection, engine="something", session="something")
def test_instantiate_manager_engine_fail_too_many_keywords(self):
with self.assertRaises(ValueError):
Manager(engine="something", session="something", echo=False)
def test_instantiate_manager_engine_missing(self):
with self.assertRaises(ValueError):
Manager(engine=None, session="fake-session")
def test_instantiate_manager_session_missing(self):
with self.assertRaises(ValueError):
Manager(engine="fake-engine", session=None)
|
pybel/pybel
|
tests/test_manager/test_connection.py
|
Python
|
mit
| 2,903
|
[
"Pybel"
] |
8ae997eb3828300b34f518f26c791cd5dfb64452208c3e2cf6fa1f752550fba7
|
"""
====================================================================
Probabilistic predictions with Gaussian process classification (GPC)
====================================================================
This example illustrates the predicted probability of GPC for an RBF kernel
with different choices of the hyperparameters. The first figure shows the
predicted probability of GPC with arbitrarily chosen hyperparameters and with
the hyperparameters corresponding to the maximum log-marginal-likelihood (LML).
While the hyperparameters chosen by optimizing LML have a considerable larger
LML, they perform slightly worse according to the log-loss on test data. The
figure shows that this is because they exhibit a steep change of the class
probabilities at the class boundaries (which is good) but have predicted
probabilities close to 0.5 far away from the class boundaries (which is bad)
This undesirable effect is caused by the Laplace approximation used
internally by GPC.
The second figure shows the log-marginal-likelihood for different choices of
the kernel's hyperparameters, highlighting the two choices of the
hyperparameters used in the first figure by black dots.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.metrics.classification import accuracy_score, log_loss
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# Generate data
train_size = 50
rng = np.random.RandomState(0)
X = rng.uniform(0, 5, 100)[:, np.newaxis]
y = np.array(X[:, 0] > 2.5, dtype=int)
# Specify Gaussian Processes with fixed and optimized hyperparameters
gp_fix = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1.0),
optimizer=None)
gp_fix.fit(X[:train_size], y[:train_size])
gp_opt = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1.0))
gp_opt.fit(X[:train_size], y[:train_size])
print("Log Marginal Likelihood (initial): %.3f"
% gp_fix.log_marginal_likelihood(gp_fix.kernel_.theta))
print("Log Marginal Likelihood (optimized): %.3f"
% gp_opt.log_marginal_likelihood(gp_opt.kernel_.theta))
print("Accuracy: %.3f (initial) %.3f (optimized)"
% (accuracy_score(y[:train_size], gp_fix.predict(X[:train_size])),
accuracy_score(y[:train_size], gp_opt.predict(X[:train_size]))))
print("Log-loss: %.3f (initial) %.3f (optimized)"
% (log_loss(y[:train_size], gp_fix.predict_proba(X[:train_size])[:, 1]),
log_loss(y[:train_size], gp_opt.predict_proba(X[:train_size])[:, 1])))
# Plot posteriors
plt.figure(0)
plt.scatter(X[:train_size, 0], y[:train_size], c='k', label="Train data")
plt.scatter(X[train_size:, 0], y[train_size:], c='g', label="Test data")
X_ = np.linspace(0, 5, 100)
plt.plot(X_, gp_fix.predict_proba(X_[:, np.newaxis])[:, 1], 'r',
label="Initial kernel: %s" % gp_fix.kernel_)
plt.plot(X_, gp_opt.predict_proba(X_[:, np.newaxis])[:, 1], 'b',
label="Optimized kernel: %s" % gp_opt.kernel_)
plt.xlabel("Feature")
plt.ylabel("Class 1 probability")
plt.xlim(0, 5)
plt.ylim(-0.25, 1.5)
plt.legend(loc="best")
# Plot LML landscape
plt.figure(1)
theta0 = np.logspace(0, 8, 30)
theta1 = np.logspace(-1, 1, 29)
Theta0, Theta1 = np.meshgrid(theta0, theta1)
LML = [[gp_opt.log_marginal_likelihood(np.log([Theta0[i, j], Theta1[i, j]]))
for i in range(Theta0.shape[0])] for j in range(Theta0.shape[1])]
LML = np.array(LML).T
plt.plot(np.exp(gp_fix.kernel_.theta)[0], np.exp(gp_fix.kernel_.theta)[1],
'ko', zorder=10)
plt.plot(np.exp(gp_opt.kernel_.theta)[0], np.exp(gp_opt.kernel_.theta)[1],
'ko', zorder=10)
plt.pcolor(Theta0, Theta1, LML)
plt.xscale("log")
plt.yscale("log")
plt.colorbar(label="Log-marginal Likelihood")
plt.xlabel("Magnitude")
plt.ylabel("Length-scale")
plt.title("Log-marginal-likelihood")
plt.show()
|
fabianp/scikit-learn
|
examples/gaussian_process/plot_gpc.py
|
Python
|
bsd-3-clause
| 3,958
|
[
"Gaussian"
] |
a5efa02004d3072f037e9fb318649658f0697b760017ef38a125a3dbf39d89a1
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS Index page (aka, Home page). Note that this is different than
what students see @ edx.org because we redirect requests to a separate web application.
"""
import datetime
from bok_choy.web_app_test import WebAppTest
from common.test.acceptance.pages.lms.index import IndexPage
class BaseLmsIndexTest(WebAppTest):
""" Base test suite for the LMS Index (Home) page """
def setUp(self):
"""
Initializes the components (page objects, courses, users) for this test suite
"""
# Some state is constructed by the parent setUp() routine
super(BaseLmsIndexTest, self).setUp()
# Load page objects for use by the tests
self.page = IndexPage(self.browser)
# Navigate to the index page and get testing!
self.page.visit()
class LmsIndexPageTest(BaseLmsIndexTest):
""" Test suite for the LMS Index (Home) page """
def setUp(self):
super(LmsIndexPageTest, self).setUp()
# Useful to capture the current datetime for our tests
self.now = datetime.datetime.now()
def test_index_basic_request(self):
"""
Perform a general validation of the index page, renders normally, no exceptions raised, etc.
"""
self.assertTrue(self.page.banner_element.visible)
expected_links = [u'About', u'Blog', u'News', u'Help Center', u'Contact', u'Careers', u'Donate']
self.assertEqual(self.page.footer_links, expected_links)
def test_intro_video_hidden_by_default(self):
"""
Confirm that the intro video is not displayed when using the default configuration
"""
# Ensure the introduction video element is not shown
self.assertFalse(self.page.intro_video_element.visible)
# Still need to figure out how to swap platform settings in the context of a bok choy test
# but we can at least prevent accidental exposure with these validations going forward
# Note: 'present' is a DOM check, whereas 'visible' is an actual browser/screen check
self.assertFalse(self.page.video_modal_element.present)
self.assertFalse(self.page.video_modal_element.visible)
|
louyihua/edx-platform
|
common/test/acceptance/tests/lms/test_lms_index.py
|
Python
|
agpl-3.0
| 2,224
|
[
"VisIt"
] |
de958a9d9442ca7e15329ddde57f7d3d61c5ac2ee38c6c9a12172c93b558bee5
|
#!/usr/bin/env python
"""Adaoted from the "wall" demo client for Open Pixel Control
http://github.com/zestyping/openpixelcontrol
Creates a shifting rainbow plaid pattern by overlaying different sine waves
in the red, green, and blue channels.
To run:
First start the gl simulator using the included "wall" layout
make
bin/gl_server layouts/octopus.json
Then run this script in another shell to send colors to the simulator
python_clients/raver_plaid.py
"""
from __future__ import division
import time
import math
import sys
import color_utils
from pattern import Pattern
# I like these trippy settings:
# freq_r: 0
# freq_g: 60
# freq_b: 24
# speed_r 3.6
# speed_g: -0.6
# speed_b: 8
# how many seconds the color sine waves take to shift through a complete cycle
class RainbowPlaidPattern(Pattern):
def __init__(self, freq_r=12, freq_g=30, freq_b=24, speed_r=2, speed_g=-3, speed_b=4):
self.register_param("freq_r", 0, freq_r*2, freq_r)
self.register_param("freq_g", 0, freq_g*2, freq_g)
self.register_param("freq_b", 0, freq_b*2, freq_b)
self.register_param("speed_r", 0.1, speed_r*2, speed_r)
self.register_param("speed_g", 0.1, speed_g*2, speed_g)
self.register_param("speed_b", 0.1, speed_b*2, speed_b)
self.start_time = time.time()
def next_frame(self, octopus, data):
t = time.time() - self.start_time
pixels = octopus.pixels()
num_pixels = len(pixels)
for ii in range(num_pixels):
pct = ii / num_pixels
# diagonal black stripes
pct_jittered = (pct * 77) % 37
blackstripes = color_utils.cos(pct_jittered, offset=t*0.05, period=1, minn=-1.5, maxx=1.5)
blackstripes_offset = color_utils.cos(t, offset=0.9, period=60, minn=-0.5, maxx=3)
blackstripes = color_utils.clamp(blackstripes + blackstripes_offset, 0, 1)
# 3 sine waves for r, g, b which are out of sync with each other
r = blackstripes * color_utils.remap(math.cos((t/self.speed_r + pct*self.freq_r)*math.pi*2), -1, 1, 0, 256)
g = blackstripes * color_utils.remap(math.cos((t/self.speed_g + pct*self.freq_g)*math.pi*2), -1, 1, 0, 256)
b = blackstripes * color_utils.remap(math.cos((t/self.speed_b + pct*self.freq_b)*math.pi*2), -1, 1, 0, 256)
pixels[ii].color = (r, g, b)
|
TheGentlemanOctopus/thegentlemanoctopus
|
octopus_code/core/octopus/patterns/rainbowPlaidPattern.py
|
Python
|
gpl-3.0
| 2,407
|
[
"Octopus"
] |
41f2c0aa60ba82ca48b4fbc26b99131241dfb4d0f3dca8bce7d4b99509be0f32
|
import json
import networkx as nx
from itertools import imap
from functools import partial
from collections import defaultdict
from math import sqrt
from datetime import datetime
from django.core.serializers.json import DjangoJSONEncoder
from django.db import connection
from django.http import HttpResponse
from catmaid.models import UserRole, ClassInstance, Treenode, \
TreenodeClassInstance, ConnectorClassInstance, Review
from catmaid.control import export_NeuroML_Level3
from catmaid.control.authentication import requires_user_role
from catmaid.control.common import get_relation_to_id_map
from catmaid.control.review import get_treenodes_to_reviews, \
get_treenodes_to_reviews_with_time
from tree_util import edge_count_to_root, partition
try:
from exportneuroml import neuroml_single_cell, neuroml_network
except ImportError:
print "NeuroML is not loading"
def get_treenodes_qs(project_id=None, skeleton_id=None, with_labels=True):
treenode_qs = Treenode.objects.filter(skeleton_id=skeleton_id)
if with_labels:
labels_qs = TreenodeClassInstance.objects.filter(
relation__relation_name='labeled_as',
treenode__skeleton_id=skeleton_id).select_related('treenode', 'class_instance')
labelconnector_qs = ConnectorClassInstance.objects.filter(
relation__relation_name='labeled_as',
connector__treenodeconnector__treenode__skeleton_id=skeleton_id).select_related('connector', 'class_instance')
else:
labels_qs = []
labelconnector_qs = []
return treenode_qs, labels_qs, labelconnector_qs
def get_swc_string(treenodes_qs):
all_rows = []
for tn in treenodes_qs:
swc_row = [tn.id]
swc_row.append(0)
swc_row.append(tn.location_x)
swc_row.append(tn.location_y)
swc_row.append(tn.location_z)
swc_row.append(max(tn.radius, 0))
swc_row.append(-1 if tn.parent_id is None else tn.parent_id)
all_rows.append(swc_row)
result = ""
for row in all_rows:
result += " ".join(map(str, row)) + "\n"
return result
def export_skeleton_response(request, project_id=None, skeleton_id=None, format=None):
treenode_qs, labels_qs, labelconnector_qs = get_treenodes_qs(project_id, skeleton_id)
if format == 'swc':
return HttpResponse(get_swc_string(treenode_qs), content_type='text/plain')
elif format == 'json':
return HttpResponse(get_json_string(treenode_qs), content_type='text/json')
else:
raise Exception, "Unknown format ('%s') in export_skeleton_response" % (format,)
@requires_user_role(UserRole.Browse)
def compact_skeleton(request, project_id=None, skeleton_id=None, with_connectors=None, with_tags=None):
"""
Performance-critical function. Do not edit unless to improve performance.
Returns, in JSON, [[nodes], [connectors], {nodeID: [tags]}], with connectors and tags being empty when 0 == with_connectors and 0 == with_tags, respectively
"""
# Sanitize
project_id = int(project_id)
skeleton_id = int(skeleton_id)
with_connectors = int(with_connectors)
with_tags = int(with_tags)
cursor = connection.cursor()
cursor.execute('''
SELECT id, parent_id, user_id,
location_x, location_y, location_z,
radius, confidence
FROM treenode
WHERE skeleton_id = %s
''' % skeleton_id)
nodes = tuple(cursor.fetchall())
if 0 == len(nodes):
# Check if the skeleton exists
if 0 == ClassInstance.objects.filter(pk=skeleton_id).count():
raise Exception("Skeleton #%s doesn't exist" % skeleton_id)
# Otherwise returns an empty list of nodes
connectors = ()
tags = defaultdict(list)
if 0 != with_connectors or 0 != with_tags:
# postgres is caching this query
cursor.execute("SELECT relation_name, id FROM relation WHERE project_id=%s" % project_id)
relations = dict(cursor.fetchall())
if 0 != with_connectors:
# Fetch all connectors with their partner treenode IDs
pre = relations['presynaptic_to']
post = relations['postsynaptic_to']
cursor.execute('''
SELECT tc.treenode_id, tc.connector_id, tc.relation_id,
c.location_x, c.location_y, c.location_z
FROM treenode_connector tc,
connector c
WHERE tc.skeleton_id = %s
AND tc.connector_id = c.id
AND (tc.relation_id = %s OR tc.relation_id = %s)
''' % (skeleton_id, pre, post))
connectors = tuple((row[0], row[1], 1 if row[2] == post else 0, row[3], row[4], row[5]) for row in cursor.fetchall())
if 0 != with_tags:
# Fetch all node tags
cursor.execute('''
SELECT c.name, tci.treenode_id
FROM treenode t,
treenode_class_instance tci,
class_instance c
WHERE t.skeleton_id = %s
AND t.id = tci.treenode_id
AND tci.relation_id = %s
AND c.id = tci.class_instance_id
''' % (skeleton_id, relations['labeled_as']))
for row in cursor.fetchall():
tags[row[0]].append(row[1])
return HttpResponse(json.dumps((nodes, connectors, tags), separators=(',', ':')))
@requires_user_role(UserRole.Browse)
def compact_arbor(request, project_id=None, skeleton_id=None, with_nodes=None, with_connectors=None, with_tags=None):
"""
Performance-critical function. Do not edit unless to improve performance.
Returns, in JSON, [[nodes], [outputs], [inputs], {nodeID: [tags]}],
with inputs and outputs being empty when 0 == with_connectors,
and the dict of node tags being empty 0 == with_tags, respectively.
The difference between this function and the compact_skeleton function is that
the connectors contain the whole chain from the skeleton of interest to the
partner skeleton:
[treenode_id, confidence,
connector_id,
confidence, treenode_id, skeleton_id,
relation_id, relation_id]
where the first 2 values are from the given skeleton_id,
then the connector_id,
then the next 3 values are from the partner skeleton,
and finally the two relations: first for the given skeleton_id and then for the other skeleton.
The relation_id is 0 for pre and 1 for post.
"""
# Sanitize
project_id = int(project_id)
skeleton_id = int(skeleton_id)
with_nodes = int(with_nodes)
with_connectors = int(with_connectors)
with_tags = int(with_tags)
cursor = connection.cursor()
nodes = ()
connectors = []
tags = defaultdict(list)
if 0 != with_nodes:
cursor.execute('''
SELECT id, parent_id, user_id,
location_x, location_y, location_z,
radius, confidence
FROM treenode
WHERE skeleton_id = %s
''' % skeleton_id)
nodes = tuple(cursor.fetchall())
if 0 == len(nodes):
# Check if the skeleton exists
if 0 == ClassInstance.objects.filter(pk=skeleton_id).count():
raise Exception("Skeleton #%s doesn't exist" % skeleton_id)
# Otherwise returns an empty list of nodes
if 0 != with_connectors or 0 != with_tags:
# postgres is caching this query
cursor.execute("SELECT relation_name, id FROM relation WHERE project_id=%s" % project_id)
relations = dict(cursor.fetchall())
if 0 != with_connectors:
# Fetch all inputs and outputs
pre = relations['presynaptic_to']
post = relations['postsynaptic_to']
cursor.execute('''
SELECT tc1.treenode_id, tc1.confidence,
tc1.connector_id,
tc2.confidence, tc2.treenode_id, tc2.skeleton_id,
tc1.relation_id, tc2.relation_id
FROM treenode_connector tc1,
treenode_connector tc2
WHERE tc1.skeleton_id = %s
AND tc1.id != tc2.id
AND tc1.connector_id = tc2.connector_id
AND (tc1.relation_id = %s OR tc1.relation_id = %s)
''' % (skeleton_id, pre, post))
for row in cursor.fetchall():
# Ignore all other kinds of relation pairs (there shouldn't be any)
if row[6] == pre and row[7] == post:
connectors.append((row[0], row[1], row[2], row[3], row[4], row[5], 0, 1))
elif row[6] == post and row[7] == pre:
connectors.append((row[0], row[1], row[2], row[3], row[4], row[5], 1, 0))
if 0 != with_tags:
# Fetch all node tags
cursor.execute('''
SELECT c.name, tci.treenode_id
FROM treenode t,
treenode_class_instance tci,
class_instance c
WHERE t.skeleton_id = %s
AND t.id = tci.treenode_id
AND tci.relation_id = %s
AND c.id = tci.class_instance_id
''' % (skeleton_id, relations['labeled_as']))
for row in cursor.fetchall():
tags[row[0]].append(row[1])
return HttpResponse(json.dumps((nodes, connectors, tags), separators=(',', ':')))
@requires_user_role([UserRole.Browse])
def treenode_time_bins(request, project_id=None, skeleton_id=None):
""" Return a map of time bins (minutes) vs. list of nodes. """
minutes = defaultdict(list)
epoch = datetime.utcfromtimestamp(0)
for row in Treenode.objects.filter(skeleton_id=int(skeleton_id)).values_list('id', 'creation_time'):
minutes[int((row[1] - epoch).total_seconds() / 60)].append(row[0])
return HttpResponse(json.dumps(minutes, separators=(',', ':')))
@requires_user_role([UserRole.Browse])
def compact_arbor_with_minutes(request, project_id=None, skeleton_id=None, with_nodes=None, with_connectors=None, with_tags=None):
r = compact_arbor(request, project_id=project_id, skeleton_id=skeleton_id, with_nodes=with_nodes, with_connectors=with_connectors, with_tags=with_tags)
r.content = "%s, %s]" % (r.content[:-1], treenode_time_bins(request, project_id=project_id, skeleton_id=skeleton_id).content)
return r
# DEPRECATED. Will be removed.
def _skeleton_for_3d_viewer(skeleton_id, project_id, with_connectors=True, lean=0, all_field=False):
""" with_connectors: when False, connectors are not returned
lean: when not zero, both connectors and tags are returned as empty arrays. """
skeleton_id = int(skeleton_id) # sanitize
cursor = connection.cursor()
# Fetch the neuron name
cursor.execute(
'''SELECT name
FROM class_instance ci,
class_instance_class_instance cici
WHERE cici.class_instance_a = %s
AND cici.class_instance_b = ci.id
''' % skeleton_id)
row = cursor.fetchone()
if not row:
# Check that the skeleton exists
cursor.execute('''SELECT id FROM class_instance WHERE id=%s''' % skeleton_id)
if not cursor.fetchone():
raise Exception("Skeleton #%s doesn't exist!" % skeleton_id)
else:
raise Exception("No neuron found for skeleton #%s" % skeleton_id)
name = row[0]
if all_field:
added_fields = ', creation_time, edition_time'
else:
added_fields = ''
# Fetch all nodes, with their tags if any
cursor.execute(
'''SELECT id, parent_id, user_id, location_x, location_y, location_z, radius, confidence %s
FROM treenode
WHERE skeleton_id = %s
''' % (added_fields, skeleton_id) )
# array of properties: id, parent_id, user_id, x, y, z, radius, confidence
nodes = tuple(cursor.fetchall())
tags = defaultdict(list) # node ID vs list of tags
connectors = []
# Get all reviews for this skeleton
if all_field:
reviews = get_treenodes_to_reviews_with_time(skeleton_ids=[skeleton_id])
else:
reviews = get_treenodes_to_reviews(skeleton_ids=[skeleton_id])
if 0 == lean: # meaning not lean
# Text tags
cursor.execute("SELECT id FROM relation WHERE project_id=%s AND relation_name='labeled_as'" % int(project_id))
labeled_as = cursor.fetchall()[0][0]
cursor.execute(
''' SELECT treenode_class_instance.treenode_id, class_instance.name
FROM treenode, class_instance, treenode_class_instance
WHERE treenode.skeleton_id = %s
AND treenode.id = treenode_class_instance.treenode_id
AND treenode_class_instance.class_instance_id = class_instance.id
AND treenode_class_instance.relation_id = %s
''' % (skeleton_id, labeled_as))
for row in cursor.fetchall():
tags[row[1]].append(row[0])
if with_connectors:
if all_field:
added_fields = ', c.creation_time'
else:
added_fields = ''
# Fetch all connectors with their partner treenode IDs
cursor.execute(
''' SELECT tc.treenode_id, tc.connector_id, r.relation_name,
c.location_x, c.location_y, c.location_z %s
FROM treenode_connector tc,
connector c,
relation r
WHERE tc.skeleton_id = %s
AND tc.connector_id = c.id
AND tc.relation_id = r.id
''' % (added_fields, skeleton_id) )
# Above, purposefully ignoring connector tags. Would require a left outer join on the inner join of connector_class_instance and class_instance, and frankly connector tags are pointless in the 3d viewer.
# List of (treenode_id, connector_id, relation_id, x, y, z)n with relation_id replaced by 0 (presynaptic) or 1 (postsynaptic)
# 'presynaptic_to' has an 'r' at position 1:
for row in cursor.fetchall():
x, y, z = imap(float, (row[3], row[4], row[5]))
connectors.append((row[0], row[1], 0 if 'r' == row[2][1] else 1, x, y, z, row[6]))
return name, nodes, tags, connectors, reviews
return name, nodes, tags, connectors, reviews
# DEPRECATED. Will be removed.
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def skeleton_for_3d_viewer(request, project_id=None, skeleton_id=None):
return HttpResponse(json.dumps(_skeleton_for_3d_viewer(skeleton_id, project_id, with_connectors=request.POST.get('with_connectors', True), lean=int(request.POST.get('lean', 0)), all_field=request.POST.get('all_fields', False)), separators=(',', ':')))
# DEPRECATED. Will be removed.
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def skeleton_with_metadata(request, project_id=None, skeleton_id=None):
def default(obj):
"""Default JSON serializer."""
import calendar, datetime
if isinstance(obj, datetime.datetime):
if obj.utcoffset() is not None:
obj = obj - obj.utcoffset()
millis = int(
calendar.timegm(obj.timetuple()) * 1000 +
obj.microsecond / 1000
)
return millis
return HttpResponse(json.dumps(_skeleton_for_3d_viewer(skeleton_id, project_id, \
with_connectors=True, lean=0, all_field=True), separators=(',', ':'), default=default))
def _measure_skeletons(skeleton_ids):
if not skeleton_ids:
raise Exception("Must provide the ID of at least one skeleton.")
skids_string = ",".join(map(str, skeleton_ids))
cursor = connection.cursor()
cursor.execute('''
SELECT id, parent_id, skeleton_id, location_x, location_y, location_z
FROM treenode
WHERE skeleton_id IN (%s)
''' % skids_string)
# TODO should be all done with numpy,
# TODO by partitioning the skeleton into sequences of x,y,z representing the slabs
# TODO and then convolving them.
class Skeleton():
def __init__(self):
self.nodes = {}
self.raw_cable = 0
self.smooth_cable = 0
self.principal_branch_cable = 0
self.n_ends = 0
self.n_branch = 0
self.n_pre = 0
self.n_post = 0
class Node():
def __init__(self, parent_id, x, y, z):
self.parent_id = parent_id
self.x = x
self.y = y
self.z = z
self.wx = x # weighted average of itself and neighbors
self.wy = y
self.wz = z
self.children = {} # node ID vs distance
skeletons = defaultdict(dict) # skeleton ID vs (node ID vs Node)
for row in cursor.fetchall():
skeleton = skeletons.get(row[2])
if not skeleton:
skeleton = Skeleton()
skeletons[row[2]] = skeleton
skeleton.nodes[row[0]] = Node(row[1], row[3], row[4], row[5])
for skeleton in skeletons.itervalues():
nodes = skeleton.nodes
tree = nx.DiGraph()
root = None
# Accumulate children
for nodeID, node in nodes.iteritems():
if not node.parent_id:
root = nodeID
continue
tree.add_edge(node.parent_id, nodeID)
parent = nodes[node.parent_id]
distance = sqrt( pow(node.x - parent.x, 2)
+ pow(node.y - parent.y, 2)
+ pow(node.z - parent.z, 2))
parent.children[nodeID] = distance
# Measure raw cable, given that we have the parent already
skeleton.raw_cable += distance
# Utilize accumulated children and the distances to them
for nodeID, node in nodes.iteritems():
# Count end nodes and branch nodes
n_children = len(node.children)
if not node.parent_id:
if 1 == n_children:
skeleton.n_ends += 1
continue
if n_children > 2:
skeleton.n_branch += 1
continue
# Else, if 2 == n_children, the root node is in the middle of the skeleton, being a slab node
elif 0 == n_children:
skeleton.n_ends += 1
continue
elif n_children > 1:
skeleton.n_branch += 1
continue
# Compute weighted position for slab nodes only
# (root, branch and end nodes do not move)
oids = node.children.copy()
if node.parent_id:
oids[node.parent_id] = skeleton.nodes[node.parent_id].children[nodeID]
sum_distances = sum(oids.itervalues())
wx, wy, wz = 0, 0, 0
for oid, distance in oids.iteritems():
other = skeleton.nodes[oid]
w = distance / sum_distances if sum_distances != 0 else 0
wx += other.x * w
wy += other.y * w
wz += other.z * w
node.wx = node.x * 0.4 + wx * 0.6
node.wy = node.y * 0.4 + wy * 0.6
node.wz = node.z * 0.4 + wz * 0.6
# Find out nodes that belong to the principal branch
principal_branch_nodes = set(sorted(partition(tree, root), key=len)[-1])
# Compute smoothed cable length, also for principal branch
for nodeID, node in nodes.iteritems():
if not node.parent_id:
# root node
continue
parent = nodes[node.parent_id]
length = sqrt( pow(node.wx - parent.wx, 2)
+ pow(node.wy - parent.wy, 2)
+ pow(node.wz - parent.wz, 2))
skeleton.smooth_cable += length
if nodeID in principal_branch_nodes:
skeleton.principal_branch_cable += length
# Count inputs
cursor.execute('''
SELECT tc.skeleton_id, count(tc.skeleton_id)
FROM treenode_connector tc,
relation r
WHERE tc.skeleton_id IN (%s)
AND tc.relation_id = r.id
AND r.relation_name = 'postsynaptic_to'
GROUP BY tc.skeleton_id
''' % skids_string)
for row in cursor.fetchall():
skeletons[row[0]].n_pre = row[1]
# Count outputs
cursor.execute('''
SELECT tc1.skeleton_id, count(tc1.skeleton_id)
FROM treenode_connector tc1,
treenode_connector tc2,
relation r1,
relation r2
WHERE tc1.skeleton_id IN (%s)
AND tc1.connector_id = tc2.connector_id
AND tc1.relation_id = r1.id
AND r1.relation_name = 'presynaptic_to'
AND tc2.relation_id = r2.id
AND r2.relation_name = 'postsynaptic_to'
GROUP BY tc1.skeleton_id
''' % skids_string)
for row in cursor.fetchall():
skeletons[row[0]].n_post = row[1]
return skeletons
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def measure_skeletons(request, project_id=None):
skeleton_ids = tuple(int(v) for k,v in request.POST.iteritems() if k.startswith('skeleton_ids['))
def asRow(skid, sk):
return (skid, int(sk.raw_cable), int(sk.smooth_cable), sk.n_pre, sk.n_post, len(sk.nodes), sk.n_branch, sk.n_ends, sk.principal_branch_cable)
return HttpResponse(json.dumps([asRow(skid, sk) for skid, sk in _measure_skeletons(skeleton_ids).iteritems()]))
def _skeleton_neuroml_cell(skeleton_id, preID, postID):
skeleton_id = int(skeleton_id) # sanitize
cursor = connection.cursor()
cursor.execute('''
SELECT id, parent_id, location_x, location_y, location_z, radius
FROM treenode
WHERE skeleton_id = %s
''' % skeleton_id)
nodes = {row[0]: (row[1], (row[2], row[3], row[4]), row[5]) for row in cursor.fetchall()}
cursor.execute('''
SELECT tc.treenode_id, tc.connector_id, tc.relation_id
FROM treenode_connector tc
WHERE tc.skeleton_id = %s
AND (tc.relation_id = %s OR tc.relation_id = %s)
''' % (skeleton_id, preID, postID))
pre = defaultdict(list) # treenode ID vs list of connector ID
post = defaultdict(list)
for row in cursor.fetchall():
if row[2] == preID:
pre[row[0]].append(row[1])
else:
post[row[0]].append(row[1])
return neuroml_single_cell(skeleton_id, nodes, pre, post)
@requires_user_role(UserRole.Browse)
def skeletons_neuroml(request, project_id=None):
""" Export a list of skeletons each as a Cell in NeuroML. """
project_id = int(project_id) # sanitize
skeleton_ids = tuple(int(v) for k,v in request.POST.iteritems() if k.startswith('skids['))
cursor = connection.cursor()
relations = get_relation_to_id_map(project_id, ('presynaptic_to', 'postsynaptic_to'), cursor)
preID = relations['presynaptic_to']
postID = relations['postsynaptic_to']
# TODO could certainly fetch all nodes and synapses in one single query and then split them up.
cells = (_skeleton_neuroml_cell(skeleton_id, preID, postID) for skeleton_id in skeleton_ids)
response = HttpResponse(content_type='text/txt')
response['Content-Disposition'] = 'attachment; filename="data.neuroml"'
neuroml_network(cells, response)
return response
@requires_user_role(UserRole.Browse)
def export_neuroml_level3_v181(request, project_id=None):
"""Export the NeuroML Level 3 version 1.8.1 representation of one or more skeletons.
Considers synapses among the requested skeletons only. """
skeleton_ids = tuple(int(v) for v in request.POST.getlist('skids[]'))
mode = int(request.POST.get('mode'))
skeleton_strings = ",".join(map(str, skeleton_ids))
cursor = connection.cursor()
relations = get_relation_to_id_map(project_id, ('presynaptic_to', 'postsynaptic_to'), cursor)
presynaptic_to = relations['presynaptic_to']
postsynaptic_to = relations['postsynaptic_to']
cursor.execute('''
SELECT cici.class_instance_a, ci.name
FROM class_instance_class_instance cici,
class_instance ci,
relation r
WHERE cici.class_instance_a IN (%s)
AND cici.class_instance_b = ci.id
AND cici.relation_id = r.id
AND r.relation_name = 'model_of'
''' % skeleton_strings)
neuron_names = dict(cursor.fetchall())
skeleton_query = '''
SELECT id, parent_id, location_x, location_y, location_z,
radius, skeleton_id
FROM treenode
WHERE skeleton_id IN (%s)
ORDER BY skeleton_id
''' % skeleton_strings
if 0 == mode:
cursor.execute('''
SELECT treenode_id, connector_id, relation_id, skeleton_id
FROM treenode_connector
WHERE skeleton_id IN (%s)
AND (relation_id = %s OR relation_id = %s)
''' % (skeleton_strings, presynaptic_to, postsynaptic_to))
# Dictionary of connector ID vs map of relation_id vs list of treenode IDs
connectors = defaultdict(partial(defaultdict, list))
for row in cursor.fetchall():
connectors[row[1]][row[2]].append((row[0], row[3]))
# Dictionary of presynaptic skeleton ID vs map of postsynaptic skeleton ID vs list of tuples with presynaptic treenode ID and postsynaptic treenode ID.
connections = defaultdict(partial(defaultdict, list))
for connectorID, m in connectors.iteritems():
for pre_treenodeID, skID1 in m[presynaptic_to]:
for post_treenodeID, skID2 in m[postsynaptic_to]:
connections[skID1][skID2].append((pre_treenodeID, post_treenodeID))
cursor.execute(skeleton_query)
generator = export_NeuroML_Level3.exportMutual(neuron_names, cursor.fetchall(), connections)
else:
if len(skeleton_ids) > 1:
raise Exception("Expected a single skeleton for mode %s!" % mode)
input_ids = tuple(int(v) for v in request.POST.getlist('inputs[]', []))
input_strings = ",".join(map(str, input_ids))
if 2 == mode:
constraint = "AND tc2.skeleton_id IN (%s)" % input_strings
elif 1 == mode:
constraint = ""
else:
raise Exception("Unknown mode %s" % mode)
cursor.execute('''
SELECT tc2.skeleton_id, tc1.treenode_id
FROM treenode_connector tc1,
treenode_connector tc2
WHERE tc1.skeleton_id = %s
AND tc1.connector_id = tc2.connector_id
AND tc1.treenode_id != tc2.treenode_id
AND tc1.relation_id = %s
AND tc2.relation_id = %s
%s
''' % (skeleton_strings, postsynaptic_to, presynaptic_to, constraint))
# Dictionary of skeleton ID vs list of treenode IDs at which the neuron receives inputs
inputs = defaultdict(list)
for row in cursor.fetchall():
inputs[row[0]].append(row[1])
cursor.execute(skeleton_query)
generator = export_NeuroML_Level3.exportSingle(neuron_names, cursor.fetchall(), inputs)
response = HttpResponse(generator, content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename=neuronal-circuit.neuroml'
return response
@requires_user_role(UserRole.Browse)
def skeleton_swc(*args, **kwargs):
kwargs['format'] = 'swc'
return export_skeleton_response(*args, **kwargs)
def _export_review_skeleton(project_id=None, skeleton_id=None, format=None,
subarbor_node_id=None):
""" Returns a list of segments for the requested skeleton. Each segment
contains information about the review status of this part of the skeleton.
If a valid subarbor_node_id is given, only data for the sub-arbor is
returned that starts at this node.
"""
# Get all treenodes of the requested skeleton
treenodes = Treenode.objects.filter(skeleton_id=skeleton_id).values_list(
'id', 'parent_id', 'location_x', 'location_y', 'location_z')
# Get all reviews for the requested skeleton
reviews = get_treenodes_to_reviews_with_time(skeleton_ids=[skeleton_id])
# Add each treenode to a networkx graph and attach reviewer information to
# it.
g = nx.DiGraph()
reviewed = set()
for t in treenodes:
# While at it, send the reviewer IDs, which is useful to iterate fwd
# to the first unreviewed node in the segment.
g.add_node(t[0], {'id': t[0], 'x': t[2], 'y': t[3], 'z': t[4], 'rids': reviews[t[0]]})
if reviews[t[0]]:
reviewed.add(t[0])
if t[1]: # if parent
g.add_edge(t[1], t[0]) # edge from parent to child
else:
root_id = t[0]
if subarbor_node_id and subarbor_node_id != root_id:
# Make sure the subarbor node ID (if any) is part of this skeleton
if subarbor_node_id not in g:
raise ValueError("Supplied subarbor node ID (%s) is not part of "
"provided skeleton (%s)" % (subarbor_node_id, skeleton_id))
# Remove connection to parent
parent = g.predecessors(subarbor_node_id)[0]
g.remove_edge(parent, subarbor_node_id)
# Remove all nodes that are upstream from the subarbor node
to_delete = set()
to_lookat = [root_id]
while to_lookat:
n = to_lookat.pop()
to_lookat.extend(g.successors(n))
to_delete.add(n)
g.remove_nodes_from(to_delete)
# Replace root id with sub-arbor ID
root_id=subarbor_node_id
# Create all sequences, as long as possible and always from end towards root
distances = edge_count_to_root(g, root_node=root_id) # distance in number of edges from root
seen = set()
sequences = []
# Iterate end nodes sorted from highest to lowest distance to root
endNodeIDs = (nID for nID in g.nodes() if 0 == len(g.successors(nID)))
for nodeID in sorted(endNodeIDs, key=distances.get, reverse=True):
sequence = [g.node[nodeID]]
parents = g.predecessors(nodeID)
while parents:
parentID = parents[0]
sequence.append(g.node[parentID])
if parentID in seen:
break
seen.add(parentID)
parents = g.predecessors(parentID)
if len(sequence) > 1:
sequences.append(sequence)
# Calculate status
segments = []
for sequence in sorted(sequences, key=len, reverse=True):
segments.append({
'id': len(segments),
'sequence': sequence,
'status': '%.2f' % (100.0 * sum(1 for node in sequence if node['id'] in reviewed) / len(sequence)),
'nr_nodes': len(sequence)
})
return segments
@requires_user_role(UserRole.Browse)
def export_review_skeleton(request, project_id=None, skeleton_id=None, format=None):
"""
Export the skeleton as a list of sequences of entries, each entry containing
an id, a sequence of nodes, the percent of reviewed nodes, and the node count.
"""
try:
subarbor_node_id = int(request.POST.get('subarbor_node_id', ''))
except ValueError:
subarbor_node_id = None
segments = _export_review_skeleton( project_id, skeleton_id, format,
subarbor_node_id )
return HttpResponse(json.dumps(segments, cls=DjangoJSONEncoder),
content_type='text/json')
@requires_user_role(UserRole.Browse)
def skeleton_connectors_by_partner(request, project_id):
""" Return a dict of requested skeleton vs relation vs partner skeleton vs list of connectors.
Connectors lacking a skeleton partner will of course not be included. """
skeleton_ids = set(int(v) for k,v in request.POST.iteritems() if k.startswith('skids['))
cursor = connection.cursor()
relations = get_relation_to_id_map(project_id, ('presynaptic_to', 'postsynaptic_to'), cursor)
pre = relations['presynaptic_to']
post = relations['postsynaptic_to']
cursor.execute('''
SELECT tc1.skeleton_id, tc1.relation_id,
tc2.skeleton_id, tc1.connector_id
FROM treenode_connector tc1,
treenode_connector tc2
WHERE tc1.skeleton_id IN (%s)
AND tc1.connector_id = tc2.connector_id
AND tc1.skeleton_id != tc2.skeleton_id
AND tc1.relation_id != tc2.relation_id
AND (tc1.relation_id = %s OR tc1.relation_id = %s)
AND (tc2.relation_id = %s OR tc2.relation_id = %s)
''' % (','.join(map(str, skeleton_ids)), pre, post, pre, post))
# Dict of skeleton vs relation vs skeleton vs list of connectors
partners = defaultdict(partial(defaultdict, partial(defaultdict, list)))
for row in cursor.fetchall():
partners[row[0]][relations[row[1]]][row[2]].append(row[3])
return HttpResponse(json.dumps(partners))
@requires_user_role(UserRole.Browse)
def export_skeleton_reviews(request, project_id=None, skeleton_id=None):
""" Return a map of treenode ID vs list of reviewer IDs,
without including any unreviewed treenode. """
m = defaultdict(list)
for row in Review.objects.filter(skeleton_id=int(skeleton_id)).values_list('treenode_id', 'reviewer_id', 'review_time').iterator():
m[row[0]].append(row[1:3])
return HttpResponse(json.dumps(m, separators=(',', ':'), cls=DjangoJSONEncoder))
@requires_user_role(UserRole.Browse)
def within_spatial_distance(request, project_id=None):
""" Find skeletons within a given Euclidean distance of a treenode. """
project_id = int(project_id)
tnid = request.POST.get('treenode', None)
if not tnid:
raise Exception("Need a treenode!")
tnid = int(tnid)
distance = int(request.POST.get('distance', 0))
if 0 == distance:
return HttpResponse(json.dumps({"skeletons": []}))
size_mode = int(request.POST.get("size_mode", 0))
having = ""
if 0 == size_mode:
having = "HAVING count(*) > 1"
elif 1 == size_mode:
having = "HAVING count(*) = 1"
# else, no constraint
cursor = connection.cursor()
cursor.execute('SELECT location_x, location_y, location_z FROM treenode WHERE id=%s' % tnid)
pos = cursor.fetchone()
limit = 100
x0 = pos[0] - distance
x1 = pos[0] + distance
y0 = pos[1] - distance
y1 = pos[1] + distance
z0 = pos[2] - distance
z1 = pos[2] + distance
# Cheap emulation of the distance
cursor.execute('''
SELECT skeleton_id, count(*)
FROM treenode
WHERE project_id = %s
AND location_x > %s
AND location_x < %s
AND location_y > %s
AND location_y < %s
AND location_z > %s
AND location_z < %s
GROUP BY skeleton_id
%s
LIMIT %s
''' % (project_id, x0, x1, y0, y1, z0, z1, having, limit))
skeletons = tuple(row[0] for row in cursor.fetchall())
return HttpResponse(json.dumps({"skeletons": skeletons,
"reached_limit": 100 == len(skeletons)}))
@requires_user_role(UserRole.Browse)
def partners_by_connector(request, project_id=None):
""" Return a list of skeleton IDs related to the given list of connector IDs of the given skeleton ID.
Will optionally filter for only presynaptic (relation=0) or only postsynaptic (relation=1). """
skid = request.POST.get('skid', None)
if not skid:
raise Exception("Need a reference skeleton ID!")
skid = int(skid)
connectors = tuple(int(v) for k,v in request.POST.iteritems() if k.startswith('connectors['))
rel_type = int(request.POST.get("relation", 0))
size_mode = int(request.POST.get("size_mode", 0))
query = '''
SELECT DISTINCT tc2.skeleton_id
FROM treenode_connector tc1,
treenode_connector tc2
WHERE tc1.project_id = %s
AND tc1.skeleton_id = %s
AND tc1.connector_id = tc2.connector_id
AND tc1.skeleton_id != tc2.skeleton_id
AND tc1.relation_id != tc2.relation_id
AND tc1.connector_id IN (%s)
''' % (project_id, skid, ",".join(str(x) for x in connectors))
# Constrain the relation of the second part
if 0 == rel_type or 1 == rel_type:
query += "AND tc2.relation_id = (SELECT id FROM relation WHERE project_id = %s AND relation_name = '%s')" % (project_id, 'presynaptic_to' if 1 == rel_type else 'postsynaptic_to')
cursor = connection.cursor()
cursor.execute(query)
if 0 == size_mode or 1 == size_mode:
# Filter by size: only those with more than one treenode or with exactly one
cursor.execute('''
SELECT skeleton_id
FROM treenode
WHERE skeleton_id IN (%s)
GROUP BY skeleton_id
HAVING count(*) %s 1
''' % (",".join(str(row[0]) for row in cursor.fetchall()), ">" if 0 == size_mode else "="))
return HttpResponse(json.dumps(tuple(row[0] for row in cursor.fetchall())))
|
AdaEne/CATMAID
|
django/applications/catmaid/control/skeletonexport.py
|
Python
|
gpl-3.0
| 36,529
|
[
"NEURON"
] |
f85942b3e736ae0490c0bc3d4166f6151721116836f24f8d72568bf04326418f
|
#!/usr/bin/env python
# Midautumn
# Copyright 2011 Ron Huang
# See LICENSE for details.
from google.appengine.dist import use_library
use_library('django', '1.2')
import os
import logging
from datetime import datetime, timedelta
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.runtime import DeadlineExceededError
from google.appengine.ext import db
from django.utils import simplejson as json
from midautumn.models import MidautumnObject, FacebookEdge, FacebookComment
import midautumn.achievement as achievement
from midautumn.handlers import BaseHandler
class ObjectHandler(BaseHandler):
def post(self):
title = self.request.get('title')
timestamp = self.request.get('timestamp')
user = self.current_user
query = db.GqlQuery("SELECT * FROM MidautumnObject WHERE title = :1", title)
mo = query.get()
args = None
if not user:
args = {'result': 'not_authorized'}
elif mo:
args = {'result': 'duplicated', 'objects': [mo.to_dict(details=True, current_user=user),]}
else:
mo = MidautumnObject(title=title, owner=user)
mo.put()
# fetch all objects after the timestamp
# should include the one just posted
query = MidautumnObject.all()
query.filter('pubtime >', datetime.utcfromtimestamp(float(timestamp)))
query.order('-pubtime')
objects = []
for obj in query:
objects.append(obj.to_dict(current_user=user))
# check achievements
achievements = []
achievements.extend(achievement.check_post(mo))
args = {'result': 'success',
'objects': objects,
'achievements': achievements}
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(args))
def get(self, key):
mo = MidautumnObject.get_by_id(int(key))
args = None
if not mo:
args = {'result': 'not_exist', 'key': key}
else:
args = {'result': 'success',
'objects': [mo.to_dict(current_user=self.current_user),]}
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(args))
class DeleteObjectHandler(BaseHandler):
def post(self, key):
user = self.current_user
mo = MidautumnObject.get_by_id(int(key))
args = None
if not user:
args = {'result': 'not_authorized'}
elif not mo:
args = {'result': 'not_exist', 'key': key}
elif mo.owner.id != user.id:
args = {'result': 'not_authorized'}
else:
mo.delete()
args = {'result': 'success', 'key': key}
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(args))
class ObjectsHandler(BaseHandler):
def get(self):
cursor = self.request.get('cursor', None)
args = None
# load initial set of data
query = MidautumnObject.all()
query.order('-pubtime')
if cursor:
query.with_cursor(cursor)
objects = []
results = query.fetch(10)
for obj in results:
objects.append(obj.to_dict(current_user=self.current_user))
args = {'result': 'success',
'objects': objects,
'cursor': query.cursor(),
'more': len(objects) >= 10,
}
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(args))
class EdgeHandler(BaseHandler):
def post(self):
action = self.request.get('action', None)
url = self.request.get('url', None)
user = self.current_user
mo = MidautumnObject.get_by_url(url)
args = None
if not user:
args = {'result': 'not_authorized'}
elif action not in ('create', 'remove'):
args = {'result': 'unknown_action'}
elif not url:
args = {'result': 'missing_parameter'}
elif not mo:
args = {'result': 'invalid_parameter'}
else:
query = user.edge_set
query.filter('url =', url)
edge = query.get()
if not edge:
edge = FacebookEdge(owner=user, url=url, object=mo)
if action == 'create':
edge.connected = True
edge.created = True
else:
edge.connected = False
edge.removed = True
edge.put()
achievements = []
achievements.extend(achievement.check_like(edge))
args = {'result': 'success',
'achievements': achievements,
}
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(args))
class CommentHandler(BaseHandler):
def post(self):
action = self.request.get('action', None)
href = self.request.get('href', None)
commentID = self.request.get('commentID', None)
user = self.current_user
mo = MidautumnObject.get_by_url(href)
args = None
if not user:
args = {'result': 'not_authorized'}
elif action not in ('create', 'remove'):
args = {'result': 'unknown_action'}
elif not href or not commentID:
args = {'result': 'missing_parameter'}
elif not mo:
args = {'result': 'invalid_parameter'}
else:
query = user.comment_set
query.filter('href =', href)
query.filter('comment_id =', commentID)
comment = query.get()
if (action == 'create' and comment) or (action == 'remove' and not comment):
args = {'result': 'invalid_state'}
elif action == 'create':
comment = FacebookComment(owner=user, href=href, comment_id=commentID, object=mo)
comment.put()
achievements = []
achievements.extend(achievement.check_comment(comment))
args = {'result': 'success',
'achievements': achievements,
}
else:
comment.delete()
achievements = []
achievements.extend(achievement.check_comment(owner=user))
args = {'result': 'success',
'achievements': achievements,
}
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(args))
class VisitHandler(BaseHandler):
def _handle_visit(self, user):
# maintain continuous visit
now = datetime.now()
count = user.continuous_visit_count
delta = now - user.continuous_visit_start
df = timedelta(count)
dt = timedelta(count + 1)
if delta >= df and delta < dt:
# advance count
user.continuous_visit_count = count + 1
user.put()
elif delta >= dt:
# continuous visit reset
user.continuous_visit_start = now
user.continuous_visit_count = 1
user.put()
else:
# still on the same day
pass
def post(self):
user = self.current_user
args = None
if not user:
args = {'result': 'not_authorized'}
else:
self._handle_visit(user)
achievements = []
achievements.extend(achievement.check_continuous_visit(user))
achievements.extend(achievement.check_visit_date(user))
args = {'result': 'success',
'achievements': achievements
}
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(args))
def main():
actions = [
('/api/object', ObjectHandler),
('/api/object/([0-9]+)$', ObjectHandler),
('/api/object/([0-9]+)/delete', DeleteObjectHandler),
('/api/objects', ObjectsHandler),
('/api/edge', EdgeHandler),
('/api/comment', CommentHandler),
('/api/visit', VisitHandler),
]
application = webapp.WSGIApplication(actions, debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
ronhuang/adieu
|
backend/midautumn/api.py
|
Python
|
mit
| 8,598
|
[
"VisIt"
] |
8ca58a5556b154e7ef054786e6e370c862612e303e4069621ba586668ebb8b89
|
# proxy module
from __future__ import absolute_import
from mayavi.action.modules import *
|
enthought/etsproxy
|
enthought/mayavi/action/modules.py
|
Python
|
bsd-3-clause
| 90
|
[
"Mayavi"
] |
1e5f16296572a44649338b1101c8cf7ad94e6a02c774de5bd07021e086898504
|
#!/usr/bin/python
# Adjust SED gratings.
#
# Copyright (C) 2010-2011 Huang Xin
#
# See LICENSE.TXT that came with this file.
from __future__ import division
import sys
from StimControl.LightStim.Core import DefaultScreen
from StimControl.LightStim.LightData import dictattr
from StimControl.LightStim.FrameControl import FrameSweep
from StimControl.LightStim.ManSED import ManSED
# Manual Grating experiment parameters, all must be scalars
p = dictattr()
# mask, one of: None, 'gaussian', or 'circle'
p.mask = 'circle'
p.maskSizeStepDeg = 0.5
# initial grating phase
p.phase0 = 0
# grating mean luminance (0-1)
p.ml = 0.5
# grating contrast (0-1)
p.contrast = 1
# background brightness (0-1)
p.bgbrightness = 0.5
# antialiase the bar?
p.antialiase = True
# flash the grating?
p.flash = False
# duration of each on period (sec)
p.flashduration = 0.5
# duration of each off period (sec)
p.flashinterval = 0.3
# factor to chage bar width and height by left/right/up/down key
p.sizemultiplier = 1.02
# factor to change temporal freq by on up/down
p.tfreqmultiplier = 0.0
# factor to change spatial freq by on left/right
p.sfreqmultiplier = 1.01
# factor to change contrast by on +/-
p.contrastmultiplier = 1.005
# orientation step size to snap to when scrolling mouse wheel (deg)
p.snapDeg = 45.0
p.radius = 2.0
p.maskDiameterDeg = 1.5
p.sfreqCycDeg = 3.0
p.tfreqCycSec = 0.0
p.ori = 0.0
if __name__ == '__main__':
DefaultScreen(['control','left','right'])
subject = None
argv = list(sys.argv)
if len(argv) >= 2:
subject = argv[1]
while subject is None:
sys.stdout.write('Please input lowercase initials of subject name: ')
subject = raw_input()
stimulus_control = ManSED(disp_info=True, subject=subject, params=p, viewport='control')
stimulus_left = ManSED(disp_info=True, subject=subject, params=p, viewport='left')
p.ori = 90.0
stimulus_right = ManSED(disp_info=True, subject=subject, params=p, viewport='right')
sweep = FrameSweep()
sweep.add_stimulus(stimulus_control)
sweep.add_stimulus(stimulus_left)
sweep.add_stimulus(stimulus_right)
sweep.go()
|
chrox/RealTimeElectrophy
|
StimControl/Experiments/demo/mansed.py
|
Python
|
bsd-2-clause
| 2,136
|
[
"Gaussian"
] |
8edb41aeb58e48ee5b3462d1e8c6ba77a5626da335db3d242be2800a066e1902
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Filters
=======
Filter bank construction
------------------------
.. autosummary::
:toctree: generated/
dct
mel
chroma
constant_q
Window functions
----------------
.. autosummary::
:toctree: generated/
window_bandwidth
get_window
Miscellaneous
-------------
.. autosummary::
:toctree: generated/
constant_q_lengths
cq_to_chroma
"""
import warnings
import numpy as np
import scipy
import scipy.signal
import six
from . import cache
from . import util
from .util.exceptions import ParameterError
from .core.time_frequency import note_to_hz, hz_to_midi, hz_to_octs
from .core.time_frequency import fft_frequencies, mel_frequencies
__all__ = ['dct',
'mel',
'chroma',
'constant_q',
'constant_q_lengths',
'cq_to_chroma',
'window_bandwidth',
'get_window']
# Dictionary of window function bandwidths
WINDOW_BANDWIDTHS = {'bart': 1.3334961334912805,
'barthann': 1.4560255965133932,
'bartlett': 1.3334961334912805,
'bkh': 2.0045975283585014,
'black': 1.7269681554262326,
'blackharr': 2.0045975283585014,
'blackman': 1.7269681554262326,
'blackmanharris': 2.0045975283585014,
'blk': 1.7269681554262326,
'bman': 1.7859588613860062,
'bmn': 1.7859588613860062,
'bohman': 1.7859588613860062,
'box': 1.0,
'boxcar': 1.0,
'brt': 1.3334961334912805,
'brthan': 1.4560255965133932,
'bth': 1.4560255965133932,
'cosine': 1.2337005350199792,
'flat': 2.7762255046484143,
'flattop': 2.7762255046484143,
'flt': 2.7762255046484143,
'halfcosine': 1.2337005350199792,
'ham': 1.3629455320350348,
'hamm': 1.3629455320350348,
'hamming': 1.3629455320350348,
'han': 1.50018310546875,
'hann': 1.50018310546875,
'hanning': 1.50018310546875,
'nut': 1.9763500280946082,
'nutl': 1.9763500280946082,
'nuttall': 1.9763500280946082,
'ones': 1.0,
'par': 1.9174603174603191,
'parz': 1.9174603174603191,
'parzen': 1.9174603174603191,
'rect': 1.0,
'rectangular': 1.0,
'tri': 1.3331706523555851,
'triang': 1.3331706523555851,
'triangle': 1.3331706523555851}
@cache(level=10)
def dct(n_filters, n_input):
"""Discrete cosine transform (DCT type-III) basis.
.. [1] http://en.wikipedia.org/wiki/Discrete_cosine_transform
Parameters
----------
n_filters : int > 0 [scalar]
number of output components (DCT filters)
n_input : int > 0 [scalar]
number of input components (frequency bins)
Returns
-------
dct_basis: np.ndarray [shape=(n_filters, n_input)]
DCT (type-III) basis vectors [1]_
Notes
-----
This function caches at level 10.
Examples
--------
>>> n_fft = 2048
>>> dct_filters = librosa.filters.dct(13, 1 + n_fft // 2)
>>> dct_filters
array([[ 0.031, 0.031, ..., 0.031, 0.031],
[ 0.044, 0.044, ..., -0.044, -0.044],
...,
[ 0.044, 0.044, ..., -0.044, -0.044],
[ 0.044, 0.044, ..., 0.044, 0.044]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(dct_filters, x_axis='linear')
>>> plt.ylabel('DCT function')
>>> plt.title('DCT filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
basis = np.empty((n_filters, n_input))
basis[0, :] = 1.0 / np.sqrt(n_input)
samples = np.arange(1, 2*n_input, 2) * np.pi / (2.0 * n_input)
for i in range(1, n_filters):
basis[i, :] = np.cos(i*samples) * np.sqrt(2.0/n_input)
return basis
@cache(level=10)
def mel(sr, n_fft, n_mels=128, fmin=0.0, fmax=None, htk=False):
"""Create a Filterbank matrix to combine FFT bins into Mel-frequency bins
Parameters
----------
sr : number > 0 [scalar]
sampling rate of the incoming signal
n_fft : int > 0 [scalar]
number of FFT components
n_mels : int > 0 [scalar]
number of Mel bands to generate
fmin : float >= 0 [scalar]
lowest frequency (in Hz)
fmax : float >= 0 [scalar]
highest frequency (in Hz).
If `None`, use `fmax = sr / 2.0`
htk : bool [scalar]
use HTK formula instead of Slaney
Returns
-------
M : np.ndarray [shape=(n_mels, 1 + n_fft/2)]
Mel transform matrix
Notes
-----
This function caches at level 10.
Examples
--------
>>> melfb = librosa.filters.mel(22050, 2048)
>>> melfb
array([[ 0. , 0.016, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
Clip the maximum frequency to 8KHz
>>> librosa.filters.mel(22050, 2048, fmax=8000)
array([[ 0. , 0.02, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(melfb, x_axis='linear')
>>> plt.ylabel('Mel filter')
>>> plt.title('Mel filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
if fmax is None:
fmax = float(sr) / 2
# Initialize the weights
n_mels = int(n_mels)
weights = np.zeros((n_mels, int(1 + n_fft // 2)))
# Center freqs of each FFT bin
fftfreqs = fft_frequencies(sr=sr, n_fft=n_fft)
# 'Center freqs' of mel bands - uniformly spaced between limits
mel_f = mel_frequencies(n_mels + 2, fmin=fmin, fmax=fmax, htk=htk)
fdiff = np.diff(mel_f)
ramps = np.subtract.outer(mel_f, fftfreqs)
for i in range(n_mels):
# lower and upper slopes for all bins
lower = -ramps[i] / fdiff[i]
upper = ramps[i+2] / fdiff[i+1]
# .. then intersect them with each other and zero
weights[i] = np.maximum(0, np.minimum(lower, upper))
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (mel_f[2:n_mels+2] - mel_f[:n_mels])
weights *= enorm[:, np.newaxis]
# Only check weights if f_mel[0] is positive
if not np.all((mel_f[:-2] == 0) | (weights.max(axis=1) > 0)):
# This means we have an empty channel somewhere
warnings.warn('Empty filters detected in mel frequency basis. '
'Some channels will produce empty responses. '
'Try increasing your sampling rate (and fmax) or '
'reducing n_mels.')
return weights
@cache(level=10)
def chroma(sr, n_fft, n_chroma=12, A440=440.0, ctroct=5.0,
octwidth=2, norm=2, base_c=True):
"""Create a Filterbank matrix to convert STFT to chroma
Parameters
----------
sr : number > 0 [scalar]
audio sampling rate
n_fft : int > 0 [scalar]
number of FFT bins
n_chroma : int > 0 [scalar]
number of chroma bins
A440 : float > 0 [scalar]
Reference frequency for A440
ctroct : float > 0 [scalar]
octwidth : float > 0 or None [scalar]
`ctroct` and `octwidth` specify a dominance window -
a Gaussian weighting centered on `ctroct` (in octs, A0 = 27.5Hz)
and with a gaussian half-width of `octwidth`.
Set `octwidth` to `None` to use a flat weighting.
norm : float > 0 or np.inf
Normalization factor for each filter
base_c : bool
If True, the filter bank will start at 'C'.
If False, the filter bank will start at 'A'.
Returns
-------
wts : ndarray [shape=(n_chroma, 1 + n_fft / 2)]
Chroma filter matrix
See Also
--------
util.normalize
feature.chroma_stft
Notes
-----
This function caches at level 10.
Examples
--------
Build a simple chroma filter bank
>>> chromafb = librosa.filters.chroma(22050, 4096)
array([[ 1.689e-05, 3.024e-04, ..., 4.639e-17, 5.327e-17],
[ 1.716e-05, 2.652e-04, ..., 2.674e-25, 3.176e-25],
...,
[ 1.578e-05, 3.619e-04, ..., 8.577e-06, 9.205e-06],
[ 1.643e-05, 3.355e-04, ..., 1.474e-10, 1.636e-10]])
Use quarter-tones instead of semitones
>>> librosa.filters.chroma(22050, 4096, n_chroma=24)
array([[ 1.194e-05, 2.138e-04, ..., 6.297e-64, 1.115e-63],
[ 1.206e-05, 2.009e-04, ..., 1.546e-79, 2.929e-79],
...,
[ 1.162e-05, 2.372e-04, ..., 6.417e-38, 9.923e-38],
[ 1.180e-05, 2.260e-04, ..., 4.697e-50, 7.772e-50]])
Equally weight all octaves
>>> librosa.filters.chroma(22050, 4096, octwidth=None)
array([[ 3.036e-01, 2.604e-01, ..., 2.445e-16, 2.809e-16],
[ 3.084e-01, 2.283e-01, ..., 1.409e-24, 1.675e-24],
...,
[ 2.836e-01, 3.116e-01, ..., 4.520e-05, 4.854e-05],
[ 2.953e-01, 2.888e-01, ..., 7.768e-10, 8.629e-10]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(chromafb, x_axis='linear')
>>> plt.ylabel('Chroma filter')
>>> plt.title('Chroma filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
wts = np.zeros((n_chroma, n_fft))
# Get the FFT bins, not counting the DC component
frequencies = np.linspace(0, sr, n_fft, endpoint=False)[1:]
frqbins = n_chroma * hz_to_octs(frequencies, A440)
# make up a value for the 0 Hz bin = 1.5 octaves below bin 1
# (so chroma is 50% rotated from bin 1, and bin width is broad)
frqbins = np.concatenate(([frqbins[0] - 1.5 * n_chroma], frqbins))
binwidthbins = np.concatenate((np.maximum(frqbins[1:] - frqbins[:-1],
1.0), [1]))
D = np.subtract.outer(frqbins, np.arange(0, n_chroma, dtype='d')).T
n_chroma2 = np.round(float(n_chroma) / 2)
# Project into range -n_chroma/2 .. n_chroma/2
# add on fixed offset of 10*n_chroma to ensure all values passed to
# rem are positive
D = np.remainder(D + n_chroma2 + 10*n_chroma, n_chroma) - n_chroma2
# Gaussian bumps - 2*D to make them narrower
wts = np.exp(-0.5 * (2*D / np.tile(binwidthbins, (n_chroma, 1)))**2)
# normalize each column
wts = util.normalize(wts, norm=norm, axis=0)
# Maybe apply scaling for fft bins
if octwidth is not None:
wts *= np.tile(
np.exp(-0.5 * (((frqbins/n_chroma - ctroct)/octwidth)**2)),
(n_chroma, 1))
if base_c:
wts = np.roll(wts, -3, axis=0)
# remove aliasing columns, copy to ensure row-contiguity
return np.ascontiguousarray(wts[:, :int(1 + n_fft/2)])
def __float_window(window_spec):
'''Decorator function for windows with fractional input.
This function guarantees that for fractional `x`, the following hold:
1. `__float_window(window_function)(x)` has length `np.ceil(x)`
2. all values from `np.floor(x)` are set to 0.
For integer-valued `x`, there should be no change in behavior.
'''
def _wrap(n, *args, **kwargs):
'''The wrapped window'''
n_min, n_max = int(np.floor(n)), int(np.ceil(n))
window = get_window(window_spec, n_min)
if len(window) < n_max:
window = np.pad(window, [(0, n_max - len(window))],
mode='constant')
window[n_min:] = 0.0
return window
return _wrap
@cache(level=10)
def constant_q(sr, fmin=None, n_bins=84, bins_per_octave=12, tuning=0.0,
window='hann', filter_scale=1, pad_fft=True, norm=1,
**kwargs):
r'''Construct a constant-Q basis.
This uses the filter bank described by [1]_.
.. [1] McVicar, Matthew.
"A machine learning approach to automatic chord extraction."
Dissertation, University of Bristol. 2013.
Parameters
----------
sr : number > 0 [scalar]
Audio sampling rate
fmin : float > 0 [scalar]
Minimum frequency bin. Defaults to `C1 ~= 32.70`
n_bins : int > 0 [scalar]
Number of frequencies. Defaults to 7 octaves (84 bins).
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440 in fractions of a bin
window : string, tuple, number, or function
Windowing function to apply to filters.
filter_scale : float > 0 [scalar]
Scale of filter windows.
Small values (<1) use shorter windows for higher temporal resolution.
pad_fft : boolean
Center-pad all filters up to the nearest integral power of 2.
By default, padding is done with zeros, but this can be overridden
by setting the `mode=` field in *kwargs*.
norm : {inf, -inf, 0, float > 0}
Type of norm to use for basis function normalization.
See librosa.util.normalize
kwargs : additional keyword arguments
Arguments to `np.pad()` when `pad==True`.
Returns
-------
filters : np.ndarray, `len(filters) == n_bins`
`filters[i]` is `i`\ th time-domain CQT basis filter
lengths : np.ndarray, `len(lengths) == n_bins`
The (fractional) length of each filter
Notes
-----
This function caches at level 10.
See Also
--------
constant_q_lengths
librosa.core.cqt
librosa.util.normalize
Examples
--------
Use a shorter window for each filter
>>> basis, lengths = librosa.filters.constant_q(22050, filter_scale=0.5)
Plot one octave of filters in time and frequency
>>> import matplotlib.pyplot as plt
>>> basis, lengths = librosa.filters.constant_q(22050)
>>> plt.figure(figsize=(10, 6))
>>> plt.subplot(2, 1, 1)
>>> notes = librosa.midi_to_note(np.arange(24, 24 + len(basis)))
>>> for i, (f, n) in enumerate(zip(basis, notes[:12])):
... f_scale = librosa.util.normalize(f) / 2
... plt.plot(i + f_scale.real)
... plt.plot(i + f_scale.imag, linestyle=':')
>>> plt.axis('tight')
>>> plt.yticks(np.arange(len(notes[:12])), notes[:12])
>>> plt.ylabel('CQ filters')
>>> plt.title('CQ filters (one octave, time domain)')
>>> plt.xlabel('Time (samples at 22050 Hz)')
>>> plt.legend(['Real', 'Imaginary'], frameon=True, framealpha=0.8)
>>> plt.subplot(2, 1, 2)
>>> F = np.abs(np.fft.fftn(basis, axes=[-1]))
>>> # Keep only the positive frequencies
>>> F = F[:, :(1 + F.shape[1] // 2)]
>>> librosa.display.specshow(F, x_axis='linear')
>>> plt.yticks(np.arange(len(notes))[::12], notes[::12])
>>> plt.ylabel('CQ filters')
>>> plt.title('CQ filter magnitudes (frequency domain)')
>>> plt.tight_layout()
'''
if fmin is None:
fmin = note_to_hz('C1')
# Pass-through parameters to get the filter lengths
lengths = constant_q_lengths(sr, fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
tuning=tuning,
window=window,
filter_scale=filter_scale)
# Apply tuning correction
correction = 2.0**(float(tuning) / bins_per_octave)
fmin = correction * fmin
# Q should be capitalized here, so we suppress the name warning
# pylint: disable=invalid-name
Q = float(filter_scale) / (2.0**(1. / bins_per_octave) - 1)
# Convert lengths back to frequencies
freqs = Q * sr / lengths
# Build the filters
filters = []
for ilen, freq in zip(lengths, freqs):
# Build the filter: note, length will be ceil(ilen)
sig = np.exp(np.arange(ilen, dtype=float) * 1j * 2 * np.pi * freq / sr)
# Apply the windowing function
sig = sig * __float_window(window)(ilen)
# Normalize
sig = util.normalize(sig, norm=norm)
filters.append(sig)
# Pad and stack
max_len = max(lengths)
if pad_fft:
max_len = int(2.0**(np.ceil(np.log2(max_len))))
else:
max_len = int(np.ceil(max_len))
filters = np.asarray([util.pad_center(filt, max_len, **kwargs)
for filt in filters])
return filters, np.asarray(lengths)
@cache(level=10)
def constant_q_lengths(sr, fmin, n_bins=84, bins_per_octave=12,
tuning=0.0, window='hann', filter_scale=1):
r'''Return length of each filter in a constant-Q basis.
Parameters
----------
sr : number > 0 [scalar]
Audio sampling rate
fmin : float > 0 [scalar]
Minimum frequency bin.
n_bins : int > 0 [scalar]
Number of frequencies. Defaults to 7 octaves (84 bins).
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440 in fractions of a bin
window : str or callable
Window function to use on filters
filter_scale : float > 0 [scalar]
Resolution of filter windows. Larger values use longer windows.
Returns
-------
lengths : np.ndarray
The length of each filter.
Notes
-----
This function caches at level 10.
See Also
--------
constant_q
librosa.core.cqt
'''
if fmin <= 0:
raise ParameterError('fmin must be positive')
if bins_per_octave <= 0:
raise ParameterError('bins_per_octave must be positive')
if filter_scale <= 0:
raise ParameterError('filter_scale must be positive')
if n_bins <= 0 or not isinstance(n_bins, int):
raise ParameterError('n_bins must be a positive integer')
correction = 2.0**(float(tuning) / bins_per_octave)
fmin = correction * fmin
# Q should be capitalized here, so we suppress the name warning
# pylint: disable=invalid-name
Q = float(filter_scale) / (2.0**(1. / bins_per_octave) - 1)
# Compute the frequencies
freq = fmin * (2.0 ** (np.arange(n_bins, dtype=float) / bins_per_octave))
if freq[-1] * (1 + 0.5 * window_bandwidth(window) / Q) > sr / 2.0:
raise ParameterError('Filter pass-band lies beyond Nyquist')
# Convert frequencies to filter lengths
lengths = Q * sr / freq
return lengths
@cache(level=10)
def cq_to_chroma(n_input, bins_per_octave=12, n_chroma=12,
fmin=None, window=None, base_c=True):
'''Convert a Constant-Q basis to Chroma.
Parameters
----------
n_input : int > 0 [scalar]
Number of input components (CQT bins)
bins_per_octave : int > 0 [scalar]
How many bins per octave in the CQT
n_chroma : int > 0 [scalar]
Number of output bins (per octave) in the chroma
fmin : None or float > 0
Center frequency of the first constant-Q channel.
Default: 'C1' ~= 32.7 Hz
window : None or np.ndarray
If provided, the cq_to_chroma filter bank will be
convolved with `window`.
base_c : bool
If True, the first chroma bin will start at 'C'
If False, the first chroma bin will start at 'A'
Returns
-------
cq_to_chroma : np.ndarray [shape=(n_chroma, n_input)]
Transformation matrix: `Chroma = np.dot(cq_to_chroma, CQT)`
Raises
------
ParameterError
If `n_input` is not an integer multiple of `n_chroma`
Notes
-----
This function caches at level 10.
Examples
--------
Get a CQT, and wrap bins to chroma
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> CQT = librosa.cqt(y, sr=sr)
>>> chroma_map = librosa.filters.cq_to_chroma(CQT.shape[0])
>>> chromagram = chroma_map.dot(CQT)
>>> # Max-normalize each time step
>>> chromagram = librosa.util.normalize(chromagram, axis=0)
>>> import matplotlib.pyplot as plt
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(CQT,
... ref=np.max),
... y_axis='cqt_note')
>>> plt.title('CQT Power')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(chromagram, y_axis='chroma')
>>> plt.title('Chroma (wrapped CQT)')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> chroma = librosa.feature.chroma_stft(y=y, sr=sr)
>>> librosa.display.specshow(chroma, y_axis='chroma', x_axis='time')
>>> plt.title('librosa.feature.chroma_stft')
>>> plt.colorbar()
>>> plt.tight_layout()
'''
# How many fractional bins are we merging?
n_merge = float(bins_per_octave) / n_chroma
if fmin is None:
fmin = note_to_hz('C1')
if np.mod(n_merge, 1) != 0:
raise ParameterError('Incompatible CQ merge: '
'input bins must be an '
'integer multiple of output bins.')
# Tile the identity to merge fractional bins
cq_to_ch = np.repeat(np.eye(n_chroma), n_merge, axis=1)
# Roll it left to center on the target bin
cq_to_ch = np.roll(cq_to_ch, - int(n_merge // 2), axis=1)
# How many octaves are we repeating?
n_octaves = np.ceil(np.float(n_input) / bins_per_octave)
# Repeat and trim
cq_to_ch = np.tile(cq_to_ch, int(n_octaves))[:, :n_input]
# What's the note number of the first bin in the CQT?
# midi uses 12 bins per octave here
midi_0 = np.mod(hz_to_midi(fmin), 12)
if base_c:
# rotate to C
roll = midi_0
else:
# rotate to A
roll = midi_0 - 9
# Adjust the roll in terms of how many chroma we want out
# We need to be careful with rounding here
roll = int(np.round(roll * (n_chroma / 12.)))
# Apply the roll
cq_to_ch = np.roll(cq_to_ch, roll, axis=0).astype(float)
if window is not None:
cq_to_ch = scipy.signal.convolve(cq_to_ch,
np.atleast_2d(window),
mode='same')
return cq_to_ch
@cache(level=10)
def window_bandwidth(window, n=1000):
'''Get the equivalent noise bandwidth of a window function.
Parameters
----------
window : callable or string
A window function, or the name of a window function.
Examples:
- scipy.signal.hann
- 'boxcar'
n : int > 0
The number of coefficients to use in estimating the
window bandwidth
Returns
-------
bandwidth : float
The equivalent noise bandwidth (in FFT bins) of the
given window function
Notes
-----
This function caches at level 10.
See Also
--------
get_window
'''
if hasattr(window, '__name__'):
key = window.__name__
else:
key = window
if key not in WINDOW_BANDWIDTHS:
win = get_window(window, n)
WINDOW_BANDWIDTHS[key] = n * np.sum(win**2) / np.sum(np.abs(win))**2
return WINDOW_BANDWIDTHS[key]
@cache(level=10)
def get_window(window, Nx, fftbins=True):
'''Compute a window function.
This is a wrapper for `scipy.signal.get_window` that additionally
supports callable or pre-computed windows.
Parameters
----------
window : string, tuple, number, callable, or list-like
The window specification:
- If string, it's the name of the window function (e.g., `'hann'`)
- If tuple, it's the name of the window function and any parameters
(e.g., `('kaiser', 4.0)`)
- If numeric, it is treated as the beta parameter of the `'kaiser'`
window, as in `scipy.signal.get_window`.
- If callable, it's a function that accepts one integer argument
(the window length)
- If list-like, it's a pre-computed window of the correct length `Nx`
Nx : int > 0
The length of the window
fftbins : bool, optional
If True (default), create a periodic window for use with FFT
If False, create a symmetric window for filter design applications.
Returns
-------
get_window : np.ndarray
A window of length `Nx` and type `window`
See Also
--------
scipy.signal.get_window
Notes
-----
This function caches at level 10.
Raises
------
ParameterError
If `window` is supplied as a vector of length != `n_fft`,
or is otherwise mis-specified.
'''
if six.callable(window):
return window(Nx)
elif (isinstance(window, (six.string_types, tuple)) or
np.isscalar(window)):
# TODO: if we add custom window functions in librosa, call them here
return scipy.signal.get_window(window, Nx, fftbins=fftbins)
elif isinstance(window, (np.ndarray, list)):
if len(window) == Nx:
return np.asarray(window)
raise ParameterError('Window size mismatch: '
'{:d} != {:d}'.format(len(window), Nx))
else:
raise ParameterError('Invalid window specification: {}'.format(window))
|
ruohoruotsi/librosa
|
librosa/filters.py
|
Python
|
isc
| 25,845
|
[
"Gaussian"
] |
776c50892649b06fadda7093df45e1e8e0a4b87cd31f74a761bebb4c1bc5d79d
|
# GALLERY_GET is a tool for downloading images from a gallery page.
#
# ABOUT THIS TOOL
# Many sites link each thumbnail to another page for displaying the image,
# which makes batch-downloading impossible (even with browser plugins).
# This tool will crawl the linked pages for the actual image and download them for you.
# Also, this tool can request multiple images at one time
#
# ABOUT THE PLUGINS FOLDER
# the gallery_plugins folder contains settings for different gallery pages
# feel free to add your own plugins for sites not already supported!
#
# Rego Sen
# Aug 22, 2013
#
import os,time,sys,traceback
import re
# Python 3 imports that throw in Python 2
try:
import queue
from html import unescape
except ImportError:
# This is Python 2
import Queue as queue
import HTMLParser
html_parser = HTMLParser.HTMLParser()
unescape = html_parser.unescape
import threading
from gallery_utils import *
from gallery_plugins import *
import multiprocessing
import calendar
QUEUE = queue.Queue()
STANDBY = False
THREADS = []
MAX_ATTEMPTS = 10
ERRORS_ENCOUNTERED = False
TEXTCHARS = ''.join(map(chr, [7,8,9,10,12,13,27] + list(range(0x20, 0x100))))
DESTPATH_FILE = os.path.join(os.path.dirname(str(__file__)), "last_gallery_dest.txt")
DEST_ROOT = unicode_safe(os.getcwd())
if os.path.exists(DESTPATH_FILE):
DEST_ROOT = unicode_safe(open(DESTPATH_FILE,"r").read().strip())
EXCEPTION_NOTICE = """An exception occurred! We can help if you follow these steps:\n
1. Visit https://github.com/regosen/gallery_get/issues
2. Click the "New Issue" button
3. Copy the text between the lines and paste it into the issue.
(If you don't want to share the last line, the rest can still help.)"""
PARAMS = []
def safe_str(name):
if is_str(name):
name = name.replace(":",";") # to preserve emoticons
name = "".join(i for i in name if ord(i)<128)
name = unescape(name)
return re.sub(r"[\/\\\*\?\"\<\>\|]", "", name).strip().rstrip(".")
else:
return str(name)
def safe_unpack(obj, default):
if is_str(obj):
return (obj,safe_str(default))
elif obj:
return (obj[0],safe_str(obj[1]))
else:
return ("","")
def safe_url(parent, link):
try:
if not link.lower().startswith("http"):
uri=urlparse(parent)
root = '{uri.scheme}://{uri.netloc}/'.format(uri=uri)
if link.startswith("//"):
link = "%s:%s" % (uri.scheme, link)
elif link.startswith("/") or root.strip('/').lower() == parent.strip('/').lower():
link = root + link
else:
link = os.path.dirname(parent) + "/" + link
return link.replace("&","&")
except AttributeError:
return ""
def find_plugin(url):
for modname in PLUGINS.keys():
if modname == "plugin_generic":
continue
cur_plugin = PLUGINS[modname]
if run_match(cur_plugin.identifier, url):
return cur_plugin
return PLUGINS["plugin_generic"]
def run_match(match, source, singleItem=False):
result = []
if not is_str(source):
result = [source]
elif match:
if is_str(match):
rematch = re.compile(match, re.I)
# support for grouped matches
for curmatch in rematch.finditer(source):
group = curmatch.groupdict()
if group:
result.append((group['link'],group['basename']))
else:
group = curmatch.groups()
if group:
result.append(group if len(group) > 1 else group[0])
else:
result.append(match)
else:
result = match(source)
if singleItem:
result = result if is_str(result) else result[0] if result else ""
elif is_str(result):
result = [result]
elif is_iterable(result):
# remove duplicates without affecting order (like set does)
visited = set()
visited_add = visited.add
result = [x for x in result if not (x in visited or visited_add(x))]
return result if result else []
def download_image(url, fileNameFull):
global ERRORS_ENCOUNTERED
try:
urlBase, fileExtension = os.path.splitext(url.split("?")[0])
fileName = os.path.abspath(fileNameFull)[:255] + fileExtension # full path must be 260 characters or lower
folder = os.path.dirname(fileName)
if not os.path.exists(folder):
os.makedirs(folder)
elif os.path.exists(fileName):
print("Skipping existing file: " + url)
return folder
add_job(path=url, dest=folder, subtitle=fileName)
return folder
except:
print("ERROR: Couldn't open URL: " + url)
ERRORS_ENCOUNTERED = True
return False
class JobInfo(object):
def __init__(self, plugin=None, subtitle="", path="", redirect="", dest="", index=0):
self.plugin = plugin
self.index = index
self.redirect = redirect
self.path = path
self.subtitle = subtitle
self.attempts = 0
self.dest = dest
self.data = None
self.override = None
def destination_filename(self):
indexstr = "%03d" % self.index # 001, 002, etc.
basename = self.subtitle
if self.override:
basename = self.override
elif self.plugin and self.plugin.use_filename:
basename = unquote(os.path.basename(self.path).split("?")[0])
elif not basename or basename == FALLBACK_TITLE:
basename = indexstr
elif self.index > 0:
(basename,ext) = os.path.splitext(basename)
basename = "%s_%s%s" % (basename, indexstr, ext)
# copy extension (falling back on jpg)
if not re.match(r".+\.[a-zA-Z0-9]+\Z", basename):
ext = ".jpg"
tokens = self.path.split("?")[0].split("/")[-1].split(".")
if len(tokens) > 1:
ext = "." + tokens[-1]
basename += ext
return os.path.join(self.dest, basename)
def write_to_file(self, file_info, file_name):
success = True
try:
if not self.data:
self.data = file_info.read()
except Exception as error:
print(error)
success = False
file_info.close()
if success:
output = open(file_name,'wb')
output.write(self.data)
output.close()
return success
def start_jobs():
global STANDBY, THREADS
if not THREADS:
STANDBY = True
for i in range(multiprocessing.cpu_count()):
t = ImgThread()
t.start()
THREADS.append(t)
def flush_jobs():
global STANDBY, THREADS, ERRORS_ENCOUNTERED
STANDBY = False
for t in THREADS:
t.join()
THREADS = []
success = not ERRORS_ENCOUNTERED
ERRORS_ENCOUNTERED = False
if success:
print("Done!")
else:
print("Errors were encountered! Please check messages above.")
return success
def add_job(plugin=None, subtitle="", path="", redirect="", dest="", index=0):
global QUEUE
start_jobs()
QUEUE.put(JobInfo(plugin=plugin, subtitle=subtitle, path=path, redirect=redirect, dest=dest, index=index))
class ImgThread(threading.Thread):
"""Threaded Url Grab"""
def can_skip(self, file_name, file_info):
if os.path.exists(file_name):
# file already exists. Skip if same size
srcsize = 0
try:
srcsize = int(file_info.headers.get("content-length"))
except:
return True
destsize = os.stat(file_name).st_size
if srcsize == destsize:
return True
return False
def copy_image(self, info):
info.attempts += 1
file_name = info.destination_filename()
try:
file_info = urlopen_safe(info.path)
except Exception as error:
print(error)
return False
try:
modtimestr = file_info.headers['last-modified']
modtime = time.strptime(modtimestr, '%a, %d %b %Y %H:%M:%S %Z')
except:
modtime = None
if self.can_skip(file_name, file_info):
print("Skipping existing file: " + info.path)
return True
if info.attempts == 1:
print("%s -> %s" % (info.path, file_name))
if not info.write_to_file(file_info, file_name):
return False
if modtime is not None:
lastmod = calendar.timegm(modtime)
os.utime(file_name, (lastmod, lastmod))
return os.path.getsize(file_name) > 4096
def is_binary(self, urlresponse):
try:
return not 'text/html' in urlresponse.headers['Content-Type']
except:
return True
def process_redirect_page(self, info, response):
global ERRORS_ENCOUNTERED
if self.is_binary(response):
# looks like the redirect page is an actual image
info.path = info.redirect
info.data = response.read()
info.override = info.subtitle
elif info.plugin:
try:
source = response.read()
except:
ERRORS_ENCOUNTERED = True
print("Error encountered reading redirect page: " + info.redirect)
return
plugin = find_plugin(info.redirect) if (info.plugin.identifier == "generic") else info.plugin
jpegs = run_match(plugin.direct,unicode_safe(source))
if not jpegs:
ERRORS_ENCOUNTERED = True
print("No links found at redirect page: " + info.redirect)
elif len(jpegs) == 1 and not info.path:
(info.path,info.subtitle) = safe_unpack(jpegs[0],info.subtitle)
else:
# redirect has multiple links, put them in their own subfolders
for idx, path in enumerate(jpegs):
(path,subtitle) = safe_unpack(path,info.subtitle)
path = safe_url(info.redirect, path)
add_job(plugin=plugin, path=path, dest=os.path.join(info.dest,subtitle), index=idx+1)
def run_internal(self):
global QUEUE, STANDBY, MAX_ATTEMPTS, ERRORS_ENCOUNTERED
while STANDBY or not QUEUE.empty():
try:
info = QUEUE.get(False)
except:
time.sleep(0.5)
continue
if info.redirect:
try:
response = urlopen_safe(info.redirect)
except:
print("WARNING: Failed to open redirect " + info.redirect)
ERRORS_ENCOUNTERED = True
continue
self.process_redirect_page(info, response)
if info.path:
info.path = safe_url(info.redirect, info.path)
while not self.copy_image(info):
if info.attempts >= MAX_ATTEMPTS:
print("ERROR: Failed to copy %s" % info.path)
ERRORS_ENCOUNTERED = True
break
QUEUE.task_done()
def run(self):
global ERRORS_ENCOUNTERED
try:
self.run_internal()
except:
print('\n' + '-'*60)
traceback.print_exc(file=sys.stdout)
print("Using params: %s" % sys.argv)
print('-'*60 + '\n')
print(EXCEPTION_NOTICE)
ERRORS_ENCOUNTERED = True
os._exit(1)
class GalleryGet(object):
def __init__(self, url, folder=DEST_ROOT, useTitleAsFolder=True, allowGenericPlugin=True):
self.url = url
self.folder = folder.strip()
self.title_as_folder = useTitleAsFolder
self.allow_generic = allowGenericPlugin # DON'T USE GENERIC PLUGIN FROM REDDIT_GET
self.plugin = find_plugin(self.url)
def get_root_and_subtitle(self, page):
title = run_match(self.plugin.title, page, True)
(title, subtitle) = safe_unpack(title, "")
title = safe_str(title)
if not title:
title = FALLBACK_TITLE
root = ""
if self.folder:
if self.title_as_folder:
root = self.folder
subtitle = title
else:
root = os.path.join(self.folder, title)
else:
root = title
return (root, subtitle)
def queue_jobs(self, page, root, subtitle):
global ERRORS_ENCOUNTERED
links = []
using_redirect = False
if self.plugin.redirect:
links = run_match(self.plugin.redirect, page)
if links:
using_redirect = True
for idx, link in enumerate(links):
(link,subtitle) = safe_unpack(link, subtitle)
link = safe_url(self.url, link)
safe_makedirs(root)
add_job(plugin=self.plugin, redirect=link, dest=root, subtitle=subtitle, index=idx+1)
if not using_redirect:
links = run_match(self.plugin.direct, page)
if len(links) == 1:
# don't create folder for only one file
(root, filename) = os.path.split(root)
safe_makedirs(root)
add_job(plugin=self.plugin, path=links[0], dest=root, subtitle=filename)
else:
safe_makedirs(root)
for idx, link in enumerate(links):
(link,subtitle) = safe_unpack(link, subtitle)
link = safe_url(self.url, link)
add_job(plugin=self.plugin, path=link, dest=root, subtitle=subtitle, index=idx+1)
if not links:
if self.folder:
ERRORS_ENCOUNTERED = True
print("No links found at %s, please check URL and try again." % self.url)
else:
ERRORS_ENCOUNTERED = True
print("No links found! Please check URL and try again.")
print("Sites occasionally change their markup. Check if this tool has an update.")
print(" - https://github.com/regosen/gallery_get")
print(" - pip install gallery_get --update")
return root
def run(self):
global QUEUE
if not self.url:
print("Nothing to do!")
return False
### FIND MATCHING PLUGIN (BASED ON URL)
if self.plugin == None:
print("Couldn't access gallery page! Try saving page(s) locally and use local path instead.")
return False
elif (not self.allow_generic) and (self.plugin.identifier == "generic"):
return False
else:
print("Using %s plugin..." % self.plugin.debugname)
### TRY OPENING URL
try:
# Don't use urlopen_text here. We want to capture when the data is in bytes, and treat as image
if self.plugin.needs_javascript:
source = urlopen_js(self.url)
page = unicode_safe(source)
else:
data = urlopen_safe(self.url)
time.sleep(self.plugin.page_load_time)
page = unicode_safe(data.read())
except Exception as error:
if ("certificate verify failed" in str(error)):
print("ERROR: Python doesn't have SSL certificates installed, can't access " + self.url)
print("Please run 'Install Certificates.command' from your Python installation directory.")
return False
elif (self.folder != DEST_ROOT) and ("." in urlparse(self.url).path):
# this could be a direct image
return download_image(self.url, self.folder)
else:
print("Skipping inaccessible link (%s): %s" % (self.url, error))
return False
### BEGIN PROCESSING
(root, subtitle) = self.get_root_and_subtitle(page)
return self.queue_jobs(page, root, subtitle)
def run_wrapped(myurl, dest, titleAsFolder=False, cacheDest=True, flushJobs=True, allowGenericPlugin=True):
global DEST_ROOT, PARAMS_DEBUG, ERRORS_ENCOUNTERED
PARAMS_DEBUG = [myurl, dest, titleAsFolder]
try:
if cacheDest and dest:
safeCacheDestination(dest)
DEST_ROOT = unicode_safe(dest)
root = GalleryGet(myurl, dest or DEST_ROOT, titleAsFolder, allowGenericPlugin).run()
if flushJobs:
flush_jobs()
return root
except:
print('\n' + '-'*60)
traceback.print_exc(file=sys.stdout)
print("Using params: %s" % PARAMS_DEBUG)
print('-'*60 + '\n')
print(EXCEPTION_NOTICE)
ERRORS_ENCOUNTERED = True
return False
def run_prompted():
global DEST_ROOT
myurl = str_input("Input URL: ").strip()
if not myurl:
print("Nothing to do!")
return False
dest = str_input("Destination (%s): " % encode_safe(DEST_ROOT)).strip()
return run_wrapped(myurl, dest or DEST_ROOT)
def run(myurl="", dest=""):
if not myurl:
return run_prompted()
else:
return run_wrapped(myurl, dest)
def safeCacheDestination(dest):
try:
open(DESTPATH_FILE,"w").write(dest)
except:
open(DESTPATH_FILE,"w").write(dest.encode("utf8"))
cur_file = os.path.basename(str(__file__))
arg_file = sys.argv[0]
if arg_file and os.path.basename(arg_file) == cur_file:
### DIRECT LAUNCH (not import)
if len(sys.argv) > 1:
# use first parameter as url, second (if exists) as dest
if len(sys.argv) > 2:
DEST_ROOT = unicode_safe(sys.argv[2])
safeCacheDestination(DEST_ROOT)
run_wrapped(sys.argv[1], DEST_ROOT)
else:
run_prompted()
|
regosen/gallery_get
|
gallery_get.py
|
Python
|
mit
| 18,548
|
[
"VisIt"
] |
68bab0099ef8f0ae1eff3549dc4ddd23c9590dda413c61ceed354db4ad461499
|
#
# Copyright (C) 2011-2021 Greg Landrum and other RDKit contributors
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
import base64
import copy
import warnings
from io import BytesIO
import IPython
from IPython.display import SVG
from rdkit import Chem
from rdkit.Chem import Draw, rdchem, rdChemReactions
from rdkit.Chem.Draw import rdMolDraw2D
if IPython.release.version < '0.11':
raise ImportError('this module requires at least v0.11 of IPython')
try:
import py3Dmol
_canUse3D = True
except ImportError:
_canUse3D = False
from PIL import Image
from PIL.PngImagePlugin import PngInfo
molSize = (450, 150)
highlightSubstructs = True
kekulizeStructures = True
highlightByReactant = False
ipython_useSVG = False
ipython_showProperties = True
ipython_maxProperties = 10
ipython_3d = False
molSize_3d = (400, 400)
drawing_type_3d = 'stick' # default drawing type for 3d structures
bgcolor_3d = '0xeeeeee'
drawOptions = rdMolDraw2D.MolDrawOptions()
def addMolToView(mol, view, confId=-1, drawAs=None):
if mol.GetNumAtoms() >= 999 or drawAs == 'cartoon':
# py3DMol is happier with TER and MASTER records present
pdb = Chem.MolToPDBBlock(mol, flavor=0x20 | 0x10)
view.addModel(pdb, 'pdb')
else:
# py3Dmol does not currently support v3k mol files, so
# we can only provide those with "smaller" molecules
mb = Chem.MolToMolBlock(mol, confId=confId)
view.addModel(mb, 'sdf')
if drawAs is None:
drawAs = drawing_type_3d
view.setStyle({drawAs: {}})
def drawMol3D(m, view=None, confId=-1, drawAs=None, bgColor=None, size=None):
if bgColor is None:
bgColor = bgcolor_3d
if size is None:
size = molSize_3d
if view is None:
view = py3Dmol.view(width=size[0], height=size[1])
view.removeAllModels()
try:
ms = iter(m)
for m in ms:
addMolToView(m, view, confId, drawAs)
except TypeError:
addMolToView(m, view, confId, drawAs)
view.setBackgroundColor(bgColor)
view.zoomTo()
return view.show()
def _toJSON(mol):
"""For IPython notebook, renders 3D webGL objects."""
if not ipython_3d or not mol.GetNumConformers():
return None
conf = mol.GetConformer()
if not conf.Is3D():
return None
res = drawMol3D(mol)
if hasattr(res, 'data'):
return res.data
return ""
def _toHTML(mol):
if _canUse3D and ipython_3d and mol.GetNumConformers():
return _toJSON(mol)
props = mol.GetPropsAsDict()
if not ipython_showProperties or not props:
return _toSVG(mol)
if mol.HasProp('_Name'):
nm = mol.GetProp('_Name')
else:
nm = ''
res = []
if not ipython_useSVG:
png = Draw._moltoimg(mol, molSize, [], nm, returnPNG=True, drawOptions=drawOptions)
png = base64.b64encode(png)
res.append(f'<tr><td colspan=2 style="text-align:center"><image src="data:image/png;base64,{png.decode()}"></td></tr>')
else:
svg = Draw._moltoSVG(mol, molSize, [], nm, kekulize=kekulizeStructures, drawOptions=drawOptions)
res.append(f'<tr><td colspan=2 style="text-align:center">{svg}</td></tr>')
for i,(pn, pv) in enumerate(props.items()):
if ipython_maxProperties >= 0 and i >= ipython_maxProperties:
res.append('<tr><td colspan=2 style="text-align:center">Property list truncated.<br />Increase IPythonConsole.ipython_maxProperties (or set it to -1) to see more properties.</td></tr>')
break
res.append(
f'<tr><th style="text-align:right">{pn}</th><td style="text-align:left">{pv}</td></tr>')
res = "\n".join(res)
return f'<table>{res}</table>'
def _toPNG(mol):
if hasattr(mol, '__sssAtoms'):
highlightAtoms = mol.__sssAtoms
else:
highlightAtoms = []
kekulize = kekulizeStructures
return Draw._moltoimg(mol, molSize, highlightAtoms, "", returnPNG=True, kekulize=kekulize,
drawOptions=drawOptions)
def _toSVG(mol):
if not ipython_useSVG:
return None
if hasattr(mol, '__sssAtoms'):
highlightAtoms = mol.__sssAtoms
else:
highlightAtoms = []
kekulize = kekulizeStructures
return Draw._moltoSVG(mol, molSize, highlightAtoms, "", kekulize, drawOptions=drawOptions)
def _toReactionPNG(rxn):
rc = copy.deepcopy(rxn)
return Draw.ReactionToImage(rc, subImgSize=(int(molSize[0] / 3), molSize[1]),
highlightByReactant=highlightByReactant, drawOptions=drawOptions,
returnPNG=True)
def _toReactionSVG(rxn):
if not ipython_useSVG:
return None
rc = copy.deepcopy(rxn)
return Draw.ReactionToImage(rc, subImgSize=(int(molSize[0] / 3), molSize[1]), useSVG=True,
highlightByReactant=highlightByReactant, drawOptions=drawOptions)
def _toMolBundlePNG(bundle):
if _MolsToGridImageSaved is not None:
fn = _MolsToGridImageSaved
else:
fn = Draw.MolsToGridImage
return fn(bundle, subImgSize=molSize, drawOptions=drawOptions, useSVG=False, returnPNG=True)
def _toMolBundleSVG(bundle):
if not ipython_useSVG:
return None
if _MolsToGridImageSaved is not None:
fn = _MolsToGridImageSaved
else:
fn = Draw.MolsToGridImage
return fn(bundle, subImgSize=molSize, drawOptions=drawOptions, useSVG=True)
def _GetSubstructMatch(mol, query, *args, **kwargs):
res = mol.__GetSubstructMatch(query, *args, **kwargs)
if highlightSubstructs:
mol.__sssAtoms = list(res)
else:
mol.__sssAtoms = []
return res
_GetSubstructMatch.__doc__ = rdchem.Mol.GetSubstructMatch.__doc__
def _GetSubstructMatches(mol, query, *args, **kwargs):
res = mol.__GetSubstructMatches(query, *args, **kwargs)
mol.__sssAtoms = []
if highlightSubstructs:
for entry in res:
mol.__sssAtoms.extend(list(entry))
return res
_GetSubstructMatches.__doc__ = rdchem.Mol.GetSubstructMatches.__doc__
# code for displaying PIL images directly,
def display_pil_image(img):
"""displayhook function for PIL Images, rendered as PNG"""
# pull metadata from the image, if there
metadata = PngInfo()
for k, v in img.info.items():
metadata.add_text(k, v)
bio = BytesIO()
img.save(bio, format='PNG', pnginfo=metadata)
return bio.getvalue()
_MolsToGridImageSaved = None
from IPython import display
def ShowMols(mols, maxMols=50, **kwargs):
global _MolsToGridImageSaved
if 'useSVG' not in kwargs:
kwargs['useSVG'] = ipython_useSVG
if 'returnPNG' not in kwargs:
kwargs['returnPNG'] = True
if _MolsToGridImageSaved is not None:
fn = _MolsToGridImageSaved
else:
fn = Draw.MolsToGridImage
if len(mols) > maxMols:
warnings.warn(
"Truncating the list of molecules to be displayed to %d. Change the maxMols value to display more."
% (maxMols))
mols = mols[:maxMols]
for prop in ('legends', 'highlightAtoms', 'highlightBonds'):
if prop in kwargs:
kwargs[prop] = kwargs[prop][:maxMols]
if not "drawOptions" in kwargs:
kwargs["drawOptions"] = drawOptions
res = fn(mols, **kwargs)
if kwargs['useSVG']:
return SVG(res)
if kwargs['returnPNG']:
return display.Image(data=res, format='png')
return res
ShowMols.__doc__ = Draw.MolsToGridImage.__doc__
def _DrawBit(fn, *args, **kwargs):
if 'useSVG' not in kwargs:
kwargs['useSVG'] = ipython_useSVG
res = fn(*args, **kwargs)
if kwargs['useSVG']:
return SVG(res)
sio = BytesIO(res)
return Image.open(sio)
def _DrawBits(fn, *args, **kwargs):
if 'useSVG' not in kwargs:
kwargs['useSVG'] = ipython_useSVG
res = fn(*args, **kwargs)
if kwargs['useSVG']:
return SVG(res)
sio = BytesIO(res)
return Image.open(sio)
_DrawMorganBitSaved = None
def DrawMorganBit(mol, bitId, bitInfo, drawOptions=drawOptions, **kwargs):
global _DrawMorganBitSaved
if _DrawMorganBitSaved is not None:
fn = _DrawMorganBitSaved
else:
fn = Draw.DrawMorganBit
return _DrawBit(fn, mol, bitId, bitInfo, drawOptions=drawOptions, **kwargs)
DrawMorganBit.__doc__ = Draw.DrawMorganBit.__doc__
_DrawMorganBitsSaved = None
def DrawMorganBits(*args, drawOptions=drawOptions, **kwargs):
global _DrawMorganBitsSaved
if _DrawMorganBitsSaved is not None:
fn = _DrawMorganBitsSaved
else:
fn = Draw.DrawMorganBits
return _DrawBit(fn, *args, drawOptions=drawOptions, **kwargs)
DrawMorganBits.__doc__ = Draw.DrawMorganBits.__doc__
_DrawRDKitBitSaved = None
def DrawRDKitBit(mol, bitId, bitInfo, drawOptions=drawOptions, **kwargs):
global _DrawRDKitBitSaved
if _DrawRDKitBitSaved is not None:
fn = _DrawRDKitBitSaved
else:
fn = Draw.DrawRDKitBit
return _DrawBit(fn, mol, bitId, bitInfo, drawOptions=drawOptions, **kwargs)
DrawRDKitBit.__doc__ = Draw.DrawRDKitBit.__doc__
_DrawRDKitBitsSaved = None
def DrawRDKitBits(*args, drawOptions=drawOptions, **kwargs):
global _DrawRDKitBitsSaved
if _DrawRDKitBitsSaved is not None:
fn = _DrawRDKitBitsSaved
else:
fn = Draw.DrawRDKitBits
return _DrawBit(fn, *args, drawOptions=drawOptions, **kwargs)
DrawRDKitBits.__doc__ = Draw.DrawRDKitBits.__doc__
_rendererInstalled = False
def EnableSubstructMatchRendering():
if not hasattr(rdchem.Mol, '__GetSubstructMatch'):
rdchem.Mol.__GetSubstructMatch = rdchem.Mol.GetSubstructMatch
rdchem.Mol.GetSubstructMatch = _GetSubstructMatch
if not hasattr(rdchem.Mol, '__GetSubstructMatches'):
rdchem.Mol.__GetSubstructMatches = rdchem.Mol.GetSubstructMatches
rdchem.Mol.GetSubstructMatches = _GetSubstructMatches
_methodsToDelete = []
def InstallIPythonRenderer():
global _MolsToGridImageSaved, _DrawRDKitBitSaved, _DrawRDKitBitsSaved, _DrawMorganBitSaved, _DrawMorganBitsSaved
global _rendererInstalled
if _rendererInstalled:
return
rdchem.Mol._repr_png_ = _toPNG
rdchem.Mol._repr_svg_ = _toSVG
_methodsToDelete.append((rdchem.Mol, '_repr_png_'))
_methodsToDelete.append((rdchem.Mol, '_repr_svg_'))
rdchem.Mol._repr_html_ = _toHTML
_methodsToDelete.append((rdchem.Mol, '_repr_html_'))
rdChemReactions.ChemicalReaction._repr_png_ = _toReactionPNG
rdChemReactions.ChemicalReaction._repr_svg_ = _toReactionSVG
_methodsToDelete.append((rdChemReactions.ChemicalReaction, '_repr_png_'))
_methodsToDelete.append((rdChemReactions.ChemicalReaction, '_repr_svg_'))
rdchem.MolBundle._repr_png_ = _toMolBundlePNG
rdchem.MolBundle._repr_svg_ = _toMolBundleSVG
_methodsToDelete.append((rdchem.MolBundle, '_repr_png_'))
_methodsToDelete.append((rdchem.MolBundle, '_repr_svg_'))
EnableSubstructMatchRendering()
Image.Image._repr_png_ = display_pil_image
_methodsToDelete.append((Image.Image, '_repr_png_'))
_MolsToGridImageSaved = Draw.MolsToGridImage
Draw.MolsToGridImage = ShowMols
_DrawRDKitBitSaved = Draw.DrawRDKitBit
Draw.DrawRDKitBit = DrawRDKitBit
_DrawRDKitBitsSaved = Draw.DrawRDKitBits
Draw.DrawRDKitBits = DrawRDKitBits
_DrawMorganBitSaved = Draw.DrawMorganBit
Draw.DrawMorganBit = DrawMorganBit
_DrawMorganBitsSaved = Draw.DrawMorganBits
Draw.DrawMorganBits = DrawMorganBits
rdchem.Mol.__DebugMol = rdchem.Mol.Debug
rdchem.Mol.Debug = lambda self, useStdout=False: self.__DebugMol(useStdout=useStdout)
_rendererInstalled = True
InstallIPythonRenderer()
def DisableSubstructMatchRendering():
if hasattr(rdchem.Mol, '__GetSubstructMatch'):
rdchem.Mol.GetSubstructMatch = rdchem.Mol.__GetSubstructMatch
del rdchem.Mol.__GetSubstructMatch
if hasattr(rdchem.Mol, '__GetSubstructMatches'):
rdchem.Mol.GetSubstructMatches = rdchem.Mol.__GetSubstructMatches
del rdchem.Mol.__GetSubstructMatches
def UninstallIPythonRenderer():
global _MolsToGridImageSaved, _DrawRDKitBitSaved, _DrawMorganBitSaved, _DrawMorganBitsSaved
global _rendererInstalled, _methodsToDelete
if not _rendererInstalled:
return
for cls, attr in _methodsToDelete:
delattr(cls, attr)
_methodsToDelete = []
DisableSubstructMatchRendering()
if _MolsToGridImageSaved is not None:
Draw.MolsToGridImage = _MolsToGridImageSaved
if _DrawRDKitBitSaved is not None:
Draw.DrawRDKitBit = _DrawRDKitBitSaved
if _DrawRDKitBitsSaved is not None:
Draw.DrawRDKitBits = _DrawRDKitBitsSaved
if _DrawMorganBitSaved is not None:
Draw.DrawMorganBit = _DrawMorganBitSaved
if _DrawMorganBitsSaved is not None:
Draw.DrawMorganBits = _DrawMorganBitsSaved
if hasattr(rdchem.Mol, '__DebugMol'):
rdchem.Mol.Debug = rdchem.Mol.__DebugMol
del rdchem.Mol.__DebugMol
_rendererInstalled = False
|
bp-kelley/rdkit
|
rdkit/Chem/Draw/IPythonConsole.py
|
Python
|
bsd-3-clause
| 12,511
|
[
"RDKit"
] |
47e9fa45c983765240251295bb4e99117de2c912e918766d8d77407ff3bb5aa3
|
"""
Converts a dict containing a mongo query to a lambda. Doesn't support all the mongo
query $keywords, probably differs the result in subtle ways.
It only supports querying flat structures: dicts with no lists or other dicts in
them (aka subdocuments).
"""
from __future__ import absolute_import
from __future__ import print_function
import linecache
import re
import sys
import weakref
import zlib
from abc import ABCMeta
from abc import abstractmethod
from warnings import warn
from six import reraise
from six import with_metaclass
__all__ = "InvalidQuery", "to_string", "to_func"
__version__ = "0.4.1"
NoneType = type(None)
class InvalidQuery(Exception):
pass # pragma: no cover
def validated_method(validator_name, func):
def validated_method_wrapper(self, value, *args, **kwargs):
if hasattr(self, validator_name):
value = getattr(self, validator_name)(value, *args, **kwargs)
else:
warn("Missing validator %s in %s" % (validator_name, type(self).__name__))
return func(self, value, *args, **kwargs)
return validated_method_wrapper
def validator_metaclass(base=type):
return type(
base.__name__ + "WithValidatorMeta",
(base, ),
{'__new__': lambda mcls, name, bases, namespace: base.__new__(mcls, name, bases, {
name: (
validated_method('validate_'+name[6:], func)
if callable(func) and name.startswith('visit_')
else func
)
for name, func in namespace.items()
})}
)
def require(*types):
def require_(value, *args, **kwargs):
if not isinstance(value, types):
raise InvalidQuery('Invalid query part %r. Expected one of: %s.' % (
value,
', '.join('None' if t is NoneType else t.__name__ for t in types)
))
return value
return require_
if sys.version_info[0] == 3:
require_string = require(str)
require_integer = require(int)
require_value = require(int, float, str, bool, NoneType)
else:
require_string = require(str, unicode) # noqa
require_integer = require(int, long) # noqa
require_value = require(int, long, float, str, unicode, bool, NoneType) # noqa
Skip = object()
Stripped = object()
Missing = object()
class BaseVisitor(with_metaclass(validator_metaclass(base=ABCMeta))):
validate_gt = validate_gte = validate_lt = validate_lte = validate_ne = validate_eq = staticmethod(require_value)
validate_query = staticmethod(require(dict))
validate_exists = staticmethod(require(object))
validate_and = validate_or = staticmethod(require(list, tuple))
validate_all = validate_in = validate_nin = staticmethod(require(set, list, tuple, frozenset))
validate_size = staticmethod(require_integer)
def validate_mod(self, value, field_name, context):
self.validate_and(value)
if len(value) != 2:
raise InvalidQuery('Invalid query part %r. You must have two items: divisor and remainder.' % value)
return value
def validate_regex(self, value, field_name, context, acceptable_options=('s', 'x', 'm', 'i')):
options = context.get('$options', Missing)
regex = context.get('$regex', Missing)
if Stripped in (options, regex):
return Stripped
extra_keys = set(i for i in context if not i.startswith('$'))
if extra_keys:
raise InvalidQuery('Invalid query part %r. You can only have `$options` with `$regex`.' % ', '.join(
repr(k) for k in extra_keys
))
if regex is Missing:
raise InvalidQuery('Invalid query part %r. Cannot have $options without $regex.' % context)
require_string(regex)
raw_options = 0
if options is not Missing:
require_string(options)
for opt in options:
if opt not in acceptable_options:
raise InvalidQuery(
"Invalid query part %r. Unsupported regex option %r. Only %s are supported !" % (
options, opt, ', '.join(acceptable_options)
)
)
raw_options |= getattr(re, opt.upper())
try:
re.compile(regex, raw_options)
except re.error as exc:
reraise(InvalidQuery, InvalidQuery("Invalid regular expression %r: %s" % (value, exc)), sys.exc_info()[2])
context['$regex'] = Stripped
if '$options' in context:
context['$options'] = Stripped
return regex, raw_options
validate_options = validate_regex
@abstractmethod
def visit_eq(self, value, field_name, context):
pass # pragma: no cover
@abstractmethod
def render_and(self, parts):
pass # pragma: no cover
def visit(self, query):
return self.visit_query(query)
def visit_query(self, query, field_name=None, context=None):
return self.render_and([
part
for part in self.handle_query(query, field_name)
if part is not Skip
], field_name, context)
def handle_query(self, query, field_name, context=None):
query = query.copy()
for name, value in query.items():
if name.startswith('$'):
opname = name[1:]
handler = 'visit_' + opname
if hasattr(self, handler):
handler = getattr(self, handler)
yield handler(value, field_name, query)
else:
raise InvalidQuery("%s doesn't support operator %r" % (type(self).__name__, name))
elif isinstance(value, dict):
yield self.visit_query(value, name, query)
else:
yield self.visit_eq(value, name, query)
class ExprVisitor(BaseVisitor):
def __init__(self, closure, object_name):
self.closure = closure
self.object_name = object_name
def visit_gt(self, value, field_name, context):
return "%s[%r] > %r" % (self.object_name, field_name, value)
def visit_gte(self, value, field_name, context):
return "%s[%r] >= %r" % (self.object_name, field_name, value)
def visit_lt(self, value, field_name, context):
return "%s[%r] < %r" % (self.object_name, field_name, value)
def visit_lte(self, value, field_name, context):
return "%s[%r] <= %r" % (self.object_name, field_name, value)
def visit_ne(self, value, field_name, context):
return "%s[%r] != %r" % (self.object_name, field_name, value)
def visit_eq(self, value, field_name, context):
return "%s[%r] == %r" % (self.object_name, field_name, value)
def visit_in(self, value, field_name, context, operator='in'):
if self.closure is None:
var_name = "{%s}" % ", ".join(repr(i) for i in value)
else:
var_name = "var%s" % len(self.closure)
self.closure[var_name] = "{%s}" % ", ".join(repr(i) for i in value)
return "%s[%r] %s %s" % (self.object_name, field_name, operator, var_name)
def visit_nin(self, value, field_name, context):
return self.visit_in(value, field_name, context, 'not in')
def visit_and(self, parts, field_name, context, operator=' and '):
return self.render_and([self.visit_query(part, field_name) for part in parts], field_name, context, operator)
def visit_or(self, parts, field_name, context):
return self.visit_and(parts, field_name, context, ' or ')
def render_and(self, parts, field_name, context, operator=' and '):
multiple = len(parts) > 1
return operator.join("(%s)" % part if multiple else part for part in parts) or 'True'
def visit_regex(self, value, field_name, context):
if value is Stripped:
return Skip
else:
regex, options = value
if self.closure is None:
return "re.search(%r, %s[%r], %d)" % (regex, self.object_name, field_name, options)
else:
var_name = "var%s" % len(self.closure)
self.closure[var_name] = "re.compile(%r, %d)" % (regex, options)
return '%s.search(%s[%r])' % (var_name, self.object_name, field_name)
visit_options = visit_regex
def visit_size(self, value, field_name, context):
return "len(%s[%r]) == %r" % (self.object_name, field_name, value)
def visit_all(self, value, field_name, context):
if self.closure is None:
return 'set(%s[%r]) >= {%s}' % (self.object_name, field_name, ', '.join(repr(i) for i in value))
else:
var_name = "var%s" % len(self.closure)
self.closure[var_name] = "{%s}" % ', '.join(repr(i) for i in value)
return 'set(%s[%r]) >= %s' % (self.object_name, field_name, var_name)
def visit_mod(self, value, field_name, context):
divisor, remainder = value
return '%s[%r] %% %s == %s' % (self.object_name, field_name, divisor, remainder)
def visit_exists(self, value, field_name, context):
return '%r %sin %s' % (
field_name, '' if value else 'not ', self.object_name,
)
class LaxNone(object):
__str__ = __repr__ = staticmethod(lambda: "LaxNone")
__eq__ = __lt__ = __le__ = __ne__ = __gt__ = __ge__ = staticmethod(lambda _: False)
__len__ = staticmethod(lambda: 0)
__iter__ = staticmethod(lambda: iter(()))
__mod__ = staticmethod(lambda _: LaxNone)
__hash__ = None
LaxNone = LaxNone()
class LaxExprVisitor(BaseVisitor):
def __init__(self, closure, object_name):
self.closure = closure
self.object_name = object_name
def visit_gt(self, value, field_name, context):
return "%s.get(%r, LaxNone) > %r" % (self.object_name, field_name, value)
def visit_gte(self, value, field_name, context):
return "%s.get(%r, LaxNone) >= %r" % (self.object_name, field_name, value)
def visit_lt(self, value, field_name, context):
return "%s.get(%r, LaxNone) < %r" % (self.object_name, field_name, value)
def visit_lte(self, value, field_name, context):
return "%s.get(%r, LaxNone) <= %r" % (self.object_name, field_name, value)
def visit_ne(self, value, field_name, context):
return "%s.get(%r, LaxNone) != %r" % (self.object_name, field_name, value)
def visit_eq(self, value, field_name, context):
return "%s.get(%r, LaxNone) == %r" % (self.object_name, field_name, value)
def visit_in(self, value, field_name, context, operator='in', juction='and'):
if self.closure is None:
var_name = "{%s}" % ", ".join(repr(i) for i in value)
else:
var_name = "var%s" % len(self.closure)
self.closure[var_name] = "{%s}" % ", ".join(repr(i) for i in value)
return "%r %s %s %s %s.get(%r, LaxNone) %s %s" % (
field_name, operator, self.object_name, juction, self.object_name, field_name, operator, var_name
)
def visit_nin(self, value, field_name, context):
return self.visit_in(value, field_name, context, 'not in', 'or')
def visit_and(self, parts, field_name, context, operator=' and '):
return self.render_and([self.visit_query(part, field_name) for part in parts], field_name, context, operator)
def visit_or(self, parts, field_name, context):
return self.visit_and(parts, field_name, context, ' or ')
def render_and(self, parts, field_name, context, operator=' and '):
multiple = len(parts) > 1
return operator.join("(%s)" % part if multiple else part for part in parts) or 'True'
def visit_regex(self, value, field_name, context):
if value is Stripped:
return Skip
else:
regex, options = value
if self.closure is None:
return "re.search(%r, %s.get(%r, ''), %d)" % (regex, self.object_name, field_name, options)
else:
var_name = "var%s" % len(self.closure)
self.closure[var_name] = "re.compile(%r, %d)" % (regex, options)
return "%s.search(%s.get(%r, ''))" % (var_name, self.object_name, field_name)
visit_options = visit_regex
def visit_size(self, value, field_name, context):
return "len(%s.get(%r, LaxNone)) == %r" % (self.object_name, field_name, value)
def visit_all(self, value, field_name, context):
if self.closure is None:
return 'set(%s.get(%r, LaxNone)) >= {%s}' % (self.object_name, field_name, ', '.join(repr(i) for i in value))
else:
var_name = "var%s" % len(self.closure)
self.closure[var_name] = "{%s}" % ', '.join(repr(i) for i in value)
return 'set(%s.get(%r, LaxNone)) >= %s' % (self.object_name, field_name, var_name)
def visit_mod(self, value, field_name, context):
divisor, remainder = value
return '%s.get(%r, LaxNone) %% %s == %s' % (self.object_name, field_name, divisor, remainder)
def visit_exists(self, value, field_name, context):
return '%r %sin %s' % (
field_name, '' if value else 'not ', self.object_name,
)
def to_string(query, closure=None, object_name='row', lax=False):
visitor = (LaxExprVisitor if lax else ExprVisitor)(closure, object_name)
return visitor.visit(query)
def to_func(query, use_arguments=True, lax=False):
closure = {} if use_arguments else None
as_string = to_string(query, closure, object_name='item', lax=lax)
as_code = "lambda item%s: (%s) # compiled from %r" % (
', ' + ', '.join('%s=%s' % (var_name, value) for var_name, value in closure.items()) if closure else '',
as_string,
query
)
filename = "<query-function-%x>" % zlib.adler32(as_string.encode('utf8'))
func = eval(compile(as_code, filename, 'eval'))
linecache.cache[filename] = len(as_code), None, [as_code], filename
func.query = query
func.source = as_code
func.cleanup = weakref.ref(func, lambda _, filename=filename: linecache.cache.pop(filename, None))
return func
|
ionelmc/python-mongoql-conv
|
src/mongoql_conv/__init__.py
|
Python
|
bsd-2-clause
| 14,173
|
[
"VisIt"
] |
35e4115571457c2b1b996b679d57ea90b799bda1c38077e8626d0fcbc16ea677
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RDirichletmultinomial(RPackage):
"""Dirichlet-Multinomial Mixture Model Machine Learning for Microbiome
Data.
Dirichlet-multinomial mixture models can be used to describe variability
in microbial metagenomic data. This package is an interface to code
originally made available by Holmes, Harris, and Quince, 2012, PLoS ONE
7(2): 1-15, as discussed further in the man page for this package,
?DirichletMultinomial."""
homepage = "https://bioconductor.org/packages/DirichletMultinomial"
git = "https://git.bioconductor.org/packages/DirichletMultinomial.git"
version('1.26.0', commit='7daa84948020811bb8a27d2e633fccfdcdd1018f')
version('1.24.1', commit='50195d9b1986852da29100e77f6f09df5d6e2a35')
version('1.22.0', commit='5864f4298105d12f345f27df77ad13bae4061ca5')
version('1.20.0', commit='251529f301da1482551142240aeb6baf8dab2272')
version('1.18.0', commit='81ccc8d83b8ef84f5d3e877bc0a04233a0f63c51')
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('gsl')
|
iulian787/spack
|
var/spack/repos/builtin/packages/r-dirichletmultinomial/package.py
|
Python
|
lgpl-2.1
| 1,391
|
[
"Bioconductor"
] |
4d57a8bf9811ddc1f80b83a1e32a913186dcfe50ea78fc4c974038226412e508
|
#!/usr/bin/python
"""
TCP Communications Module
"""
import asyncore
import socket
import errno
import cPickle as pickle
from time import time as _time, sleep as _sleep
from StringIO import StringIO
from .debugging import ModuleLogger, DebugContents, bacpypes_debugging
from .core import deferred
from .task import FunctionTask, OneShotFunction
from .comm import PDU, Client, Server
from .comm import ServiceAccessPoint, ApplicationServiceElement
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# globals
REBIND_SLEEP_INTERVAL = 2.0
CONNECT_TIMEOUT = 30.0
#
# PickleActorMixIn
#
@bacpypes_debugging
class PickleActorMixIn:
def __init__(self, *args):
if _debug: PickleActorMixIn._debug("__init__ %r", args)
super(PickleActorMixIn, self).__init__(*args)
# keep an upstream buffer
self.pickleBuffer = ''
def indication(self, pdu):
if _debug: PickleActorMixIn._debug("indication %r", pdu)
# pickle the data
pdu.pduData = pickle.dumps(pdu.pduData)
# continue as usual
super(PickleActorMixIn, self).indication(pdu)
def response(self, pdu):
if _debug: PickleActorMixIn._debug("response %r", pdu)
# add the data to our buffer
self.pickleBuffer += pdu.pduData
# build a file-like object around the buffer
strm = StringIO(self.pickleBuffer)
pos = 0
while (pos < strm.len):
try:
# try to load something
msg = pickle.load(strm)
except:
break
# got a message
rpdu = PDU(msg)
rpdu.update(pdu)
super(PickleActorMixIn, self).response(rpdu)
# see where we are
pos = strm.tell()
# save anything left over, if there is any
if (pos < strm.len):
self.pickleBuffer = self.pickleBuffer[pos:]
else:
self.pickleBuffer = ''
#
# TCPClient
#
# This class is a mapping between the client/server pattern and the
# socket API. The ctor is given the address to connect as a TCP
# client. Because objects of this class sit at the bottom of a
# protocol stack they are accessed as servers.
#
@bacpypes_debugging
class TCPClient(asyncore.dispatcher):
_connect_timeout = CONNECT_TIMEOUT
def __init__(self, peer):
if _debug: TCPClient._debug("__init__ %r", peer)
asyncore.dispatcher.__init__(self)
# ask the dispatcher for a socket
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
# make sure the connection attempt is non-blocking
self.socket.setblocking(0)
if _debug: TCPClient._debug(" - non-blocking")
# save the peer
self.peer = peer
self.connected = False
# create a request buffer
self.request = b''
# try to connect
try:
rslt = self.socket.connect_ex(peer)
if (rslt == 0):
if _debug: TCPClient._debug(" - connected")
self.connected = True
elif (rslt == errno.EINPROGRESS):
if _debug: TCPClient._debug(" - in progress")
elif (rslt == errno.ECONNREFUSED):
if _debug: TCPClient._debug(" - connection refused")
self.handle_error(rslt)
else:
if _debug: TCPClient._debug(" - connect_ex: %r", rslt)
except socket.error as err:
if _debug: TCPClient._debug(" - connect socket error: %r", err)
# pass along to a handler
self.handle_error(err)
def handle_accept(self):
if _debug: TCPClient._debug("handle_accept")
def handle_connect(self):
if _debug: TCPClient._debug("handle_connect")
self.connected = True
def handle_connect_event(self):
if _debug: TCPClient._debug("handle_connect_event")
# there might be an error
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if _debug: TCPClient._debug(" - err: %r", err)
# check for connection refused
if (err == 0):
if _debug: TCPClient._debug(" - no error")
self.connected = True
elif (err == errno.ECONNREFUSED):
if _debug: TCPClient._debug(" - connection to %r refused", self.peer)
self.handle_error(socket.error(errno.ECONNREFUSED, "connection refused"))
return
# pass along
asyncore.dispatcher.handle_connect_event(self)
def readable(self):
return self.connected
def handle_read(self):
if _debug: TCPClient._debug("handle_read")
try:
msg = self.recv(65536)
if _debug: TCPClient._debug(" - received %d octets", len(msg))
# no socket means it was closed
if not self.socket:
if _debug: TCPClient._debug(" - socket was closed")
else:
# send the data upstream
deferred(self.response, PDU(msg))
except socket.error as err:
if (err.args[0] == errno.ECONNREFUSED):
if _debug: TCPClient._debug(" - connection to %r refused", self.peer)
else:
if _debug: TCPClient._debug(" - recv socket error: %r", err)
# pass along to a handler
self.handle_error(err)
def writable(self):
if not self.connected:
return True
return (len(self.request) != 0)
def handle_write(self):
if _debug: TCPClient._debug("handle_write")
try:
sent = self.send(self.request)
if _debug: TCPClient._debug(" - sent %d octets, %d remaining", sent, len(self.request) - sent)
self.request = self.request[sent:]
except socket.error as err:
if (err.args[0] == errno.EPIPE):
if _debug: TCPClient._debug(" - broken pipe to %r", self.peer)
return
elif (err.args[0] == errno.ECONNREFUSED):
if _debug: TCPClient._debug(" - connection to %r refused", self.peer)
else:
if _debug: TCPClient._debug(" - send socket error: %s", err)
# pass along to a handler
self.handle_error(err)
def handle_write_event(self):
if _debug: TCPClient._debug("handle_write_event")
# there might be an error
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if _debug: TCPClient._debug(" - err: %r", err)
# check for connection refused
if err == 0:
if not self.connected:
if _debug: TCPClient._debug(" - connected")
self.handle_connect()
else:
if _debug: TCPClient._debug(" - peer: %r", self.peer)
if (err == errno.ECONNREFUSED):
socket_error = socket.error(err, "connection refused")
elif (err == errno.ETIMEDOUT):
socket_error = socket.error(err, "timed out")
elif (err == errno.EHOSTUNREACH):
socket_error = socket.error(err, "host unreachable")
else:
socket_error = socket.error(err, "other unknown: %r" % (err,))
if _debug: TCPClient._debug(" - socket_error: %r", socket_error)
self.handle_error(socket_error)
return
# pass along
asyncore.dispatcher.handle_write_event(self)
def handle_close(self):
if _debug: TCPClient._debug("handle_close")
# close the socket
self.close()
# no longer connected
self.connected = False
# make sure other routines know the socket is closed
self.socket = None
def handle_error(self, error=None):
"""Trap for TCPClient errors, otherwise continue."""
if _debug: TCPClient._debug("handle_error %r", error)
# if there is no socket, it was closed
if not self.socket:
if _debug: TCPClient._debug(" - error already handled")
return
# core does not take parameters
asyncore.dispatcher.handle_error(self)
def indication(self, pdu):
"""Requests are queued for delivery."""
if _debug: TCPClient._debug("indication %r", pdu)
self.request += pdu.pduData
#
# TCPClientActor
#
# Actors are helper objects for a director. There is one actor for
# each connection.
#
@bacpypes_debugging
class TCPClientActor(TCPClient):
def __init__(self, director, peer):
if _debug: TCPClientActor._debug("__init__ %r %r", director, peer)
# no director yet, no connection error
self.director = None
self._connection_error = None
# add a timer
self._connect_timeout = director.connect_timeout
if self._connect_timeout:
self.connect_timeout_task = FunctionTask(self.connect_timeout)
self.connect_timeout_task.install_task(_time() + self._connect_timeout)
else:
self.connect_timeout_task = None
# continue with initialization
TCPClient.__init__(self, peer)
# keep track of the director
self.director = director
# add a timer
self._idle_timeout = director.idle_timeout
if self._idle_timeout:
self.idle_timeout_task = FunctionTask(self.idle_timeout)
self.idle_timeout_task.install_task(_time() + self._idle_timeout)
else:
self.idle_timeout_task = None
# this may have a flush state
self.flush_task = None
# tell the director this is a new actor
self.director.add_actor(self)
# if there was a connection error, pass it to the director
if self._connection_error:
if _debug: TCPClientActor._debug(" - had connection error")
self.director.actor_error(self, self._connection_error)
def handle_connect(self):
if _debug: TCPClientActor._debug("handle_connect")
# see if we are already connected
if self.connected:
if _debug: TCPClientActor._debug(" - already connected")
return
# if the connection timeout is scheduled, suspend it
if self.connect_timeout_task:
if _debug: TCPClientActor._debug(" - canceling connection timeout")
self.connect_timeout_task.suspend_task()
self.connect_timeout_task = None
# contine as expected
TCPClient.handle_connect(self)
def handle_error(self, error=None):
"""Trap for TCPClient errors, otherwise continue."""
if _debug: TCPClientActor._debug("handle_error %r", error)
# pass along to the director
if error is not None:
# this error may be during startup
if not self.director:
self._connection_error = error
else:
self.director.actor_error(self, error)
else:
TCPClient.handle_error(self)
def handle_close(self):
if _debug: TCPClientActor._debug("handle_close")
# if there's a flush task, cancel it
if self.flush_task:
self.flush_task.suspend_task()
# cancel the timers
if self.connect_timeout_task:
if _debug: TCPClientActor._debug(" - canceling connection timeout")
self.connect_timeout_task.suspend_task()
self.connect_timeout_task = None
if self.idle_timeout_task:
if _debug: TCPClientActor._debug(" - canceling idle timeout")
self.idle_timeout_task.suspend_task()
self.idle_timeout_task = None
# tell the director this is gone
self.director.del_actor(self)
# pass the function along
TCPClient.handle_close(self)
def connect_timeout(self):
if _debug: TCPClientActor._debug("connect_timeout")
# shut it down
self.handle_close()
def idle_timeout(self):
if _debug: TCPClientActor._debug("idle_timeout")
# shut it down
self.handle_close()
def indication(self, pdu):
if _debug: TCPClientActor._debug("indication %r", pdu)
# additional downstream data is tossed while flushing
if self.flush_task:
if _debug: TCPServerActor._debug(" - flushing")
return
# reschedule the timer
if self.idle_timeout_task:
self.idle_timeout_task.install_task(_time() + self._idle_timeout)
# continue as usual
TCPClient.indication(self, pdu)
def response(self, pdu):
if _debug: TCPClientActor._debug("response %r", pdu)
# put the peer address in as the source
pdu.pduSource = self.peer
# reschedule the timer
if self.idle_timeout_task:
self.idle_timeout_task.install_task(_time() + self._idle_timeout)
# process this as a response from the director
self.director.response(pdu)
def flush(self):
if _debug: TCPClientActor._debug("flush")
# clear out the old task
self.flush_task = None
# if the outgoing buffer has data, re-schedule another attempt
if self.request:
self.flush_task = OneShotFunction(self.flush)
return
# close up shop, all done
self.handle_close()
#
# TCPPickleClientActor
#
class TCPPickleClientActor(PickleActorMixIn, TCPClientActor):
pass
#
# TCPClientDirector
#
# A client director presents a connection pool as one virtual
# interface. If a request should be sent to an address and there
# is no connection already established for it, it will create one
# and maintain it. PDU's from TCP clients have no source address,
# so one is provided by the client actor.
#
@bacpypes_debugging
class TCPClientDirector(Server, ServiceAccessPoint, DebugContents):
_debug_contents = ('connect_timeout', 'idle_timeout', 'actorClass', 'clients', 'reconnect')
def __init__(self, connect_timeout=None, idle_timeout=None, actorClass=TCPClientActor, sid=None, sapID=None):
if _debug:
TCPClientDirector._debug("__init__ connect_timeout=%r idle_timeout=%r actorClass=%r sid=%r sapID=%r",
connect_timeout, idle_timeout, actorClass, sid, sapID,
)
Server.__init__(self, sid)
ServiceAccessPoint.__init__(self, sapID)
# check the actor class
if not issubclass(actorClass, TCPClientActor):
raise TypeError("actorClass must be a subclass of TCPClientActor")
self.actorClass = actorClass
# save the timeout for actors
self.connect_timeout = connect_timeout
self.idle_timeout = idle_timeout
# start with an empty client pool
self.clients = {}
# no clients automatically reconnecting
self.reconnect = {}
def add_actor(self, actor):
"""Add an actor when a new one is connected."""
if _debug: TCPClientDirector._debug("add_actor %r", actor)
self.clients[actor.peer] = actor
# tell the ASE there is a new client
if self.serviceElement:
self.sap_request(add_actor=actor)
def del_actor(self, actor):
"""Remove an actor when the socket is closed."""
if _debug: TCPClientDirector._debug("del_actor %r", actor)
# delete the client
del self.clients[actor.peer]
# tell the ASE the client has gone away
if self.serviceElement:
self.sap_request(del_actor=actor)
# see if it should be reconnected
if actor.peer in self.reconnect:
connect_task = FunctionTask(self.connect, actor.peer)
connect_task.install_task(_time() + self.reconnect[actor.peer])
def actor_error(self, actor, error):
if _debug: TCPClientDirector._debug("actor_error %r %r", actor, error)
# tell the ASE the actor had an error
if self.serviceElement:
self.sap_request(actor_error=actor, error=error)
def get_actor(self, address):
""" Get the actor associated with an address or None. """
return self.clients.get(address, None)
def connect(self, address, reconnect=0):
if _debug: TCPClientDirector._debug("connect %r reconnect=%r", address, reconnect)
if address in self.clients:
return
# create an actor, which will eventually call add_actor
client = self.actorClass(self, address)
if _debug: TCPClientDirector._debug(" - client: %r", client)
# if it should automatically reconnect, save the timer value
if reconnect:
self.reconnect[address] = reconnect
def disconnect(self, address):
if _debug: TCPClientDirector._debug("disconnect %r", address)
if address not in self.clients:
return
# if it would normally reconnect, don't bother
if address in self.reconnect:
del self.reconnect[address]
# close it
self.clients[address].handle_close()
def indication(self, pdu):
"""Direct this PDU to the appropriate server, create a
connection if one hasn't already been created."""
if _debug: TCPClientDirector._debug("indication %r", pdu)
# get the destination
addr = pdu.pduDestination
# get the client
client = self.clients.get(addr, None)
if not client:
client = self.actorClass(self, addr)
# send the message
client.indication(pdu)
#
# TCPServer
#
@bacpypes_debugging
class TCPServer(asyncore.dispatcher):
def __init__(self, sock, peer):
if _debug: TCPServer._debug("__init__ %r %r", sock, peer)
asyncore.dispatcher.__init__(self, sock)
# save the peer
self.peer = peer
# create a request buffer
self.request = b''
def handle_connect(self):
if _debug: TCPServer._debug("handle_connect")
def readable(self):
return self.connected
def handle_read(self):
if _debug: TCPServer._debug("handle_read")
try:
msg = self.recv(65536)
if _debug: TCPServer._debug(" - received %d octets", len(msg))
# no socket means it was closed
if not self.socket:
if _debug: TCPServer._debug(" - socket was closed")
else:
# send the data upstream
deferred(self.response, PDU(msg))
except socket.error as err:
if (err.args[0] == errno.ECONNREFUSED):
if _debug: TCPServer._debug(" - connection to %r refused", self.peer)
else:
if _debug: TCPServer._debug(" - recv socket error: %r", err)
# pass along to a handler
self.handle_error(err)
def writable(self):
return (len(self.request) != 0)
def handle_write(self):
if _debug: TCPServer._debug("handle_write")
try:
sent = self.send(self.request)
if _debug: TCPServer._debug(" - sent %d octets, %d remaining", sent, len(self.request) - sent)
self.request = self.request[sent:]
except socket.error as err:
if (err.args[0] == errno.ECONNREFUSED):
if _debug: TCPServer._debug(" - connection to %r refused", self.peer)
else:
if _debug: TCPServer._debug(" - send socket error: %s", err)
# sent the exception upstream
self.handle_error(err)
def handle_close(self):
if _debug: TCPServer._debug("handle_close")
if not self:
if _debug: TCPServer._debug(" - self is None")
return
if not self.socket:
if _debug: TCPServer._debug(" - socket already closed")
return
self.close()
self.socket = None
def handle_error(self, error=None):
"""Trap for TCPServer errors, otherwise continue."""
if _debug: TCPServer._debug("handle_error %r", error)
# core does not take parameters
asyncore.dispatcher.handle_error(self)
def indication(self, pdu):
"""Requests are queued for delivery."""
if _debug: TCPServer._debug("indication %r", pdu)
self.request += pdu.pduData
#
# TCPServerActor
#
@bacpypes_debugging
class TCPServerActor(TCPServer):
def __init__(self, director, sock, peer):
if _debug: TCPServerActor._debug("__init__ %r %r %r", director, sock, peer)
TCPServer.__init__(self, sock, peer)
# keep track of the director
self.director = director
# add a timer
self._idle_timeout = director.idle_timeout
if self._idle_timeout:
self.idle_timeout_task = FunctionTask(self.idle_timeout)
self.idle_timeout_task.install_task(_time() + self._idle_timeout)
else:
self.idle_timeout_task = None
# this may have a flush state
self.flush_task = None
# tell the director this is a new actor
self.director.add_actor(self)
def handle_error(self, error=None):
"""Trap for TCPServer errors, otherwise continue."""
if _debug: TCPServerActor._debug("handle_error %r", error)
# pass along to the director
if error is not None:
self.director.actor_error(self, error)
else:
TCPServer.handle_error(self)
def handle_close(self):
if _debug: TCPServerActor._debug("handle_close")
# if there's a flush task, cancel it
if self.flush_task:
self.flush_task.suspend_task()
# if there is an idle timeout, cancel it
if self.idle_timeout_task:
if _debug: TCPServerActor._debug(" - canceling idle timeout")
self.idle_timeout_task.suspend_task()
self.idle_timeout_task = None
# tell the director this is gone
self.director.del_actor(self)
# pass it down
TCPServer.handle_close(self)
def idle_timeout(self):
if _debug: TCPServerActor._debug("idle_timeout")
# shut it down
self.handle_close()
def indication(self, pdu):
if _debug: TCPServerActor._debug("indication %r", pdu)
# additional downstream data is tossed while flushing
if self.flush_task:
if _debug: TCPServerActor._debug(" - flushing")
return
# reschedule the timer
if self.idle_timeout_task:
self.idle_timeout_task.install_task(_time() + self._idle_timeout)
# continue as usual
TCPServer.indication(self, pdu)
def response(self, pdu):
if _debug: TCPServerActor._debug("response %r", pdu)
# upstream data is tossed while flushing
if self.flush_task:
if _debug: TCPServerActor._debug(" - flushing")
return
# save the source
pdu.pduSource = self.peer
# reschedule the timer
if self.idle_timeout_task:
self.idle_timeout_task.install_task(_time() + self._idle_timeout)
# process this as a response from the director
self.director.response(pdu)
def flush(self):
if _debug: TCPServerActor._debug("flush")
# clear out the old task
self.flush_task = None
# if the outgoing buffer has data, re-schedule another attempt
if self.request:
self.flush_task = OneShotFunction(self.flush)
return
# close up shop, all done
self.handle_close()
#
# TCPPickleServerActor
#
class TCPPickleServerActor(PickleActorMixIn, TCPServerActor):
pass
#
# TCPServerDirector
#
@bacpypes_debugging
class TCPServerDirector(asyncore.dispatcher, Server, ServiceAccessPoint, DebugContents):
_debug_contents = ('port', 'idle_timeout', 'actorClass', 'servers')
def __init__(self, address, listeners=5, idle_timeout=0, reuse=False, actorClass=TCPServerActor, cid=None, sapID=None):
if _debug:
TCPServerDirector._debug("__init__ %r listeners=%r idle_timeout=%r reuse=%r actorClass=%r cid=%r sapID=%r"
, address, listeners, idle_timeout, reuse, actorClass, cid, sapID
)
Server.__init__(self, cid)
ServiceAccessPoint.__init__(self, sapID)
# save the address and timeout
self.port = address
self.idle_timeout = idle_timeout
# check the actor class
if not issubclass(actorClass, TCPServerActor):
raise TypeError("actorClass must be a subclass of TCPServerActor")
self.actorClass = actorClass
# start with an empty pool of servers
self.servers = {}
# continue with initialization
asyncore.dispatcher.__init__(self)
# create a listening port
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
if reuse:
self.set_reuse_addr()
# try to bind, keep trying for a while if its already in use
hadBindErrors = False
for i in range(30):
try:
self.bind(address)
break
except socket.error as err:
hadBindErrors = True
TCPServerDirector._warning('bind error %r, sleep and try again', err)
_sleep(REBIND_SLEEP_INTERVAL)
else:
TCPServerDirector._error('unable to bind')
raise RuntimeError("unable to bind")
# if there were some bind errors, generate a meesage that all is OK now
if hadBindErrors:
TCPServerDirector._info('bind successful')
self.listen(listeners)
def handle_accept(self):
if _debug: TCPServerDirector._debug("handle_accept")
try:
client, addr = self.accept()
except socket.error:
TCPServerDirector._warning('accept() threw an exception')
return
except TypeError:
TCPServerDirector._warning('accept() threw EWOULDBLOCK')
return
if _debug: TCPServerDirector._debug(" - connection %r, %r", client, addr)
# create a server
server = self.actorClass(self, client, addr)
# add it to our pool
self.servers[addr] = server
# return it to the dispatcher
return server
def handle_close(self):
if _debug: TCPServerDirector._debug("handle_close")
# close the socket
self.close()
def add_actor(self, actor):
if _debug: TCPServerDirector._debug("add_actor %r", actor)
self.servers[actor.peer] = actor
# tell the ASE there is a new server
if self.serviceElement:
self.sap_request(add_actor=actor)
def del_actor(self, actor):
if _debug: TCPServerDirector._debug("del_actor %r", actor)
try:
del self.servers[actor.peer]
except KeyError:
TCPServerDirector._warning("del_actor: %r not an actor", actor)
# tell the ASE the server has gone away
if self.serviceElement:
self.sap_request(del_actor=actor)
def actor_error(self, actor, error):
if _debug: TCPServerDirector._debug("actor_error %r %r", actor, error)
# tell the ASE the actor had an error
if self.serviceElement:
self.sap_request(actor_error=actor, error=error)
def get_actor(self, address):
""" Get the actor associated with an address or None. """
return self.servers.get(address, None)
def indication(self, pdu):
"""Direct this PDU to the appropriate server."""
if _debug: TCPServerDirector._debug("indication %r", pdu)
# get the destination
addr = pdu.pduDestination
# get the server
server = self.servers.get(addr, None)
if not server:
raise RuntimeError("not a connected server")
# pass the indication to the actor
server.indication(pdu)
#
# StreamToPacket
#
@bacpypes_debugging
class StreamToPacket(Client, Server):
def __init__(self, fn, cid=None, sid=None):
if _debug: StreamToPacket._debug("__init__ %r cid=%r, sid=%r", fn, cid, sid)
Client.__init__(self, cid)
Server.__init__(self, sid)
# save the packet function
self.packetFn = fn
# start with an empty set of buffers
self.upstreamBuffer = {}
self.downstreamBuffer = {}
def packetize(self, pdu, streamBuffer):
if _debug: StreamToPacket._debug("packetize %r ...", pdu)
def chop(addr):
if _debug: StreamToPacket._debug("chop %r", addr)
# get the current downstream buffer
buff = streamBuffer.get(addr, b'') + pdu.pduData
if _debug: StreamToPacket._debug(" - buff: %r", buff)
# look for a packet
while 1:
packet = self.packetFn(buff)
if _debug: StreamToPacket._debug(" - packet: %r", packet)
if packet is None:
break
yield PDU(packet[0],
source=pdu.pduSource,
destination=pdu.pduDestination,
user_data=pdu.pduUserData,
)
buff = packet[1]
# save what didn't get sent
streamBuffer[addr] = buff
# buffer related to the addresses
if pdu.pduSource:
for pdu in chop(pdu.pduSource):
yield pdu
if pdu.pduDestination:
for pdu in chop(pdu.pduDestination):
yield pdu
def indication(self, pdu):
"""Message going downstream."""
if _debug: StreamToPacket._debug("indication %r", pdu)
# hack it up into chunks
for packet in self.packetize(pdu, self.downstreamBuffer):
self.request(packet)
def confirmation(self, pdu):
"""Message going upstream."""
if _debug: StreamToPacket._debug("StreamToPacket.confirmation %r", pdu)
# hack it up into chunks
for packet in self.packetize(pdu, self.upstreamBuffer):
self.response(packet)
#
# StreamToPacketSAP
#
@bacpypes_debugging
class StreamToPacketSAP(ApplicationServiceElement, ServiceAccessPoint):
def __init__(self, stp, aseID=None, sapID=None):
if _debug: StreamToPacketSAP._debug("__init__ %r aseID=%r, sapID=%r", stp, aseID, sapID)
ApplicationServiceElement.__init__(self, aseID)
ServiceAccessPoint.__init__(self, sapID)
# save a reference to the StreamToPacket object
self.stp = stp
def indication(self, add_actor=None, del_actor=None, actor_error=None, error=None):
if _debug: StreamToPacketSAP._debug("indication add_actor=%r del_actor=%r", add_actor, del_actor)
if add_actor:
# create empty buffers associated with the peer
self.stp.upstreamBuffer[add_actor.peer] = b''
self.stp.downstreamBuffer[add_actor.peer] = b''
if del_actor:
# delete the buffer contents associated with the peer
del self.stp.upstreamBuffer[del_actor.peer]
del self.stp.downstreamBuffer[del_actor.peer]
# chain this along
if self.serviceElement:
self.sap_request(
add_actor=add_actor,
del_actor=del_actor,
actor_error=actor_error, error=error,
)
|
JoelBender/bacpypes
|
py27/bacpypes/tcp.py
|
Python
|
mit
| 31,609
|
[
"ASE"
] |
289dbe09537e5566df151ab8c1e586a3052a8b2cf4dbf4043563011536d5c947
|
"""Topology handling in gromacs"""
from ..db import ChemlabDB
from ..core import System, Molecule, Atom
from ..table import atomic_no, atomic_weight
from .energy import lorentz_berthelot as combine_lorentz_berthelot
import itertools
import time
import datetime
from itertools import combinations, combinations_with_replacement
from collections import OrderedDict
import numpy as np
def line(*args, **kwargs):
just = kwargs.get("just", "left")
if just == "right":
return ' '.join(str(a).rjust(10) for a in args) + '\n'
if just == "left":
return ' '.join(str(a).ljust(10) for a in args) + '\n'
else:
raise ValueError('just must be right or left')
def comment(*args):
return ';' + line(*args)
class ChargedLJ(object):
def __init__(self, name, q, type, sigma, eps):
self.name = name
self.q = q
self.type = type
self.sigma = sigma
self.eps = eps
@property
def c6(self):
return 4.0 * self.eps * self.sigma ** 6
@property
def c12(self):
return 4.0 * self.eps * self.sigma ** 12
class InterMolecular(object):
def __init__(self, type='lj'):
self.particles = {}
self.special_pairs = {}
self.type = type
@classmethod
def from_dict(cls, data):
self = cls()
for name, atomspec in data.items():
particle = ChargedLJ(name, atomspec['q'], atomspec[
'type'], atomspec['sigma'], atomspec['eps'])
self.particles[name] = particle
return self
def pair_interaction(self, a, b):
i, j = self.particles[a], self.particles[b]
if (a, b) in self.special_pairs:
params = self.special_pairs[a, b]
else:
params = {}
if self.type == 'lj':
sigma, eps = combine_lorentz_berthelot(
i.sigma, j.sigma, i.eps, j.eps)
return PairInteraction((i, j), sigma, eps)
elif self.type == 'custom':
# We expect coulomb, dispersion, repulsion
coulomb = params['coulomb']
dispersion = params['dispersion']
repulsion = params['repulsion']
return CustomPairInteraction((i, j), coulomb, dispersion, repulsion)
else:
raise ValueError("Type not recognized")
class PairInteraction:
def __init__(self, pair, sigma=None, eps=None):
self.pair = pair
self.sigma, self.eps = combine_lorentz_berthelot(pair[0].sigma, pair[1].sigma, pair[0].eps, pair[1].eps)
@property
def c6(self):
return 4.0 * self.eps * self.sigma ** 6
@property
def c12(self):
return 4.0 * self.eps * self.sigma ** 12
def f(self, x):
return 1.0/x
def g(self, x):
return - self.c6 * (1/x**6)
def h(self, x):
return self.c12 * (1/x**12)
class CustomParticle:
def __init__(self, name, type, q, params):
self.name = name
self.type = type
self.params = params
self.q = q
from scipy.misc import derivative
class CustomPairInteraction:
def __init__(self, pair, coulomb, dispersion, repulsion):
'''Define a custom pair interaction. func is a python function that
takes an array of x values and returns an array of potential values'''
self.pair = pair
self.coulomb = coulomb
self.dispersion = dispersion
self.repulsion = repulsion
def f(self, x):
return self.coulomb(x, self.pair[0].params, self.pair[1].params)
def df(self, x):
return derivative(self.f, x, dx=1e-10)
def g(self, x):
return self.dispersion(x, self.pair[0].params, self.pair[1].params)
def dg(self, x):
return derivative(self.g, x, dx=1e-10)
def h(self, x):
return self.repulsion(x, self.pair[0].params, self.pair[1].params)
def dh(self, x):
return derivative(self.h, x, dx=1e-10)
class MolecularConstraints:
def __init__(self, name, atoms, bonds, angles, dihedrals):
self.atoms = atoms
self.name = name
self.bonds = bonds
self.angles = angles
self.dihedrals = dihedrals
class HarmonicConstraint:
def __init__(self, between, r, k):
self.between = between
self.r = r
self.k = k
class HarmonicAngleConstraint:
def __init__(self, between, theta, k):
self.between = between
self.theta = theta
self.k = k
class IntraMolecular(object):
def __init__(self):
self.molecules = {}
@classmethod
def from_dict(cls, data):
self = cls()
for name, molspec in data.items():
if 'bonds' in molspec:
bonds = [HarmonicConstraint(b['between'], b['r'], b['k'])
for b in molspec['bonds']]
else:
bonds = []
if 'angles' in molspec:
angles = [HarmonicAngleConstraint(b['between'], b['theta'], b['k'])
for b in molspec['angles']]
else:
angles = []
cst = MolecularConstraints(
name, molspec['atoms'], bonds, angles, [])
self.molecules[name] = cst
return self
class Potential(object):
def __init__(self, nonbonded, bonded):
self.intermolecular = nonbonded
self.intramolecular = bonded
class ForceGenerator(object):
def __init__(self, spec):
self.intermolecular = InterMolecular.from_dict(spec['nonbonded'])
self.intramolecular = IntraMolecular.from_dict(spec['bonded'])
def to_table(custom_interaction, cutoff, precision='double'):
if precision == 'single':
step = 0.002
if precision == 'double':
step = 0.0005
else:
raise ValueError("Precision can be either single or double")
r = np.arange(0.0, 1 + cutoff + 2*step, step)
f = custom_interaction.f(r)
df = custom_interaction.df(r)
g = custom_interaction.g(r)
dg = custom_interaction.dg(r)
h = custom_interaction.h(r)
dh = custom_interaction.dh(r)
columns = [r, f, -df, g, -dg, h, -dh]
rows = np.array(columns).T
rows[0] = 0.0
return '\n'.join(' '.join("%.8E" % n for n in row) for row in rows)
def to_top(system, potential):
molecules = [system.subentity(Molecule, i)
for i in range(system.dimensions['molecule'])]
unique_molecules = OrderedDict()
[unique_molecules.__setitem__(m.molecule_name, m) for m in molecules]
unique_atoms = OrderedDict()
for m in unique_molecules.values():
for a in [m.subentity(Atom, i) for i in range(m.dimensions['atom'])]:
unique_atoms[a.atom_name] = a
# Defaults section
r = ''
r += comment('Generated by chemlab ' +
datetime.datetime
.fromtimestamp(time.time())
.strftime('%Y-%m-%d %H:%M:%S'))
r += line('[ defaults ]')
r += comment('nbfunc', 'comb-rule', 'gen-pairs', 'fudgeL', 'fudgeQQ')
r += line(1, 1, "yes", 0.5, 0.5)
r += line()
# Non bonded interactions
r += line('[ atomtypes ]')
r += comment('name', 'atom_type', 'mass', 'charge', 'ptype', 'C', 'A')
name_to_type = {}
for atom in unique_atoms:
# potential.intermolecular.particles
particle = potential.intermolecular.particles[atom]
if isinstance(particle, ChargedLJ):
r += line(particle.name, particle.type, atomic_no(particle.type), atomic_weight(particle.type),
particle.q, 'A', particle.c6, particle.c12)
elif isinstance(particle, CustomParticle):
r += line(particle.name, particle.type, atomic_no(particle.type), atomic_weight(particle.type),
particle.q, 'A', 1.0, 1.0)
else:
raise ValueError("unknown particle type {}".format(particle))
name_to_type[particle.name] = particle.type
r += line()
r += line('[ nonbondparams ]')
r += comment('i', 'j', 'func', 'V', 'W')
# We override gromacs with our own rules
for atom1, atom2 in combinations_with_replacement(unique_atoms, 2):
# potential.intermolecular.pairs:
pair = potential.intermolecular.pair_interaction(atom1, atom2)
if isinstance(pair, PairInteraction):
r += line(pair.pair[0].name,
pair.pair[1].name,
1, # Combination rule 1 = lorentz-berthelot
pair.c6,
pair.c12)
elif isinstance(pair, CustomPairInteraction):
r += line(pair.pair[0].name,
pair.pair[1].name, 1, 1.0, 1.0)
else:
raise ValueError("Wrong pair interaction {}".format(pair))
r += line()
for molecule_name in unique_molecules:
# print potential.intramolecular.molecules
molecule = potential.intramolecular.molecules[molecule_name]
r += line('[ moleculetype ]')
r += comment('name', 'nbexcl')
r += line(molecule.name, 2)
r += line()
# Atoms directive...
r += line('[ atoms ]', just="left")
r += comment('nr', 'type', 'resnr', 'residue',
'atom', 'cgnr', 'charge', 'mass')
for i, t in enumerate(molecule.atoms):
p = potential.intermolecular.particles[t]
r += line(i + 1, t, 1, molecule.name, t, 1, p.q)
# 1 O 1 SOL OW 1 -0.8476
r += line()
# Bonds directive...
if molecule.bonds:
r += line('[ bonds ]', just="left")
r += comment('i', 'j', 'funct', 'length', 'force.c.')
for b in molecule.bonds:
r += line(b.between[0] + 1, b.between[1] + 1, 1, b.r, b.k)
r += line()
# Angle directive...
if molecule.angles:
r += line('[ angles ]', just="left")
r += comment('i', 'j', 'k', 'funct', 'angle', 'force.c.')
for ang in molecule.angles:
r += line(ang.between[0] + 1,
ang.between[1] + 1,
ang.between[2] + 1, 1, ang.theta, ang.k)
r += line()
# Create dihedrals
for ang in molecule.dihedrals:
r += line(ang.between[0] + 1,
ang.between[1] + 1,
ang.between[2] + 1, 1, ang.theta, ang.k)
r += line()
# System
r += line('[ system ]')
r += line('flying pandas')
r += line()
r += line('[ molecules ]')
counter = 0
current = -1
mollist = []
for t in system.molecule_name:
if t != current:
mollist.append((current, counter))
current = t
counter = 0
counter += 1
mollist.append((current, counter))
mollist.pop(0)
for mol, counter in mollist:
r += line(mol, counter)
return r
def from_top(topfile):
topfile.read()
# atom_types
# pair_interactions -> system-wide (they are combined for all molecules)
# bond_interactions -> relative to each molecule
# angle_interactions -> relative to each molecule
# number of molecules -> relative only to the system, but this is a flaw of
# the top format, we don't read that
|
chemlab/chemlab
|
chemlab/md/potential.py
|
Python
|
gpl-3.0
| 11,381
|
[
"Gromacs"
] |
43091bdd31c17b7c2f271be0d327da3ba0521185cce75dcf764b08a3d82f92e1
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 16 02:08:24 2014
@author: hangyin
"""
# a python implementation of Cross-Entropy method for stochastic optimization
import copy
import numpy as np
class GaussianCEM:
def __init__(self, x0=[0], eliteness=10, covar_full=False, covar_learning_rate=1, covar_scale=None, covar_bounds=[0.1]):
self.options = {
'dist_weighting_method': 'PI-BB',
'dist_cov_update': 'PI-BB',
'dist_eliteness': eliteness,
'covar_full': covar_full,
'covar_learning_rate': covar_learning_rate,
'covar_scale': covar_scale,
'covar_bounds': covar_bounds
}
self.mean = np.array(x0)
self.covar = np.eye(len(self.mean))
return
def fit(self, X, y):
#one fit step given samples and cost values of the queries
dist_parms = {'mean':self.mean, 'covar':self.covar}
res_dist_parms, _ = self.default_dist_fitting(dist_parms, X, y, self.options)
self.mean = res_dist_parms['mean']
self.covar = res_dist_parms['covar']
return
def sample(self, n_samples=10):
#take gaussian sample from the current internal distribution
return np.random.multivariate_normal(self.mean, self.covar, n_samples)
#default fitting, just to fit a gaussian
def default_dist_fitting(self, dist_parms, samples, costs, options, var_dim=None):
if var_dim is None:
var_dim = (0, len(dist_parms['mean']))
#this is the key function, need to use weighted samples according to
#evaluated cost to work out updated parameters for the distribution
#map the cost to weight
#check relative standard deviation, if small enough, then all samples should be treated equally
if np.std(costs) / np.mean(costs) < 1e-8:
weights = np.ones(np.shape(costs))
else:
if options['dist_weighting_method'] == 'PI-BB':
#take exponential of the cost
h = options['dist_eliteness']
weights = np.exp(-h*(costs - min(costs)) / (max(costs) - min(costs) + 1e-6))
elif options['dist_weighting_method'] == 'CEM' \
or options['dist_weighting_method'] == 'CMA-ES':
#CEM/CMA-ES heuristics, rank them, mu is the number of samples to select
mu = options['dist_eliteness']
sorted_costs_idx = np.argsort(costs)
weights = np.zeros(np.shape(costs))
if options['dist_weighting_method'] == 'CEM':
weights[sorted_costs_idx[:mu]] = 1 / mu
else:
weights[sorted_costs_idx[:mu]] = np.log(mu + 1./2) - np.log(range(mu) + 1)
else:
print "Undefined distribution weighting method {0}.".format(options['dist_weighting_method'])
return None
#normalize the weights, estimate weighted mean/covariance, herustics for covariance if necessary
weights = weights / np.sum(weights)
#compute weighted mean
res_dist_parms = copy.deepcopy(dist_parms)
res_dist_parms['mean'] = np.sum([samples[i] * weights[i] for i in range(len(weights))], axis=0)
#weighted covariance
if options['dist_cov_update'] == 'decay':
#decay the covariance, turns out to be similar to Simulated Annealing
covar_new = options['dist_cov_decay'] * dist_parms['covar']
elif options['dist_cov_update'] == 'PI-BB' or options['dist_cov_update'] == 'CEM':
mu = dist_parms['mean']
if options['dist_cov_update'] == 'CEM':
#for standard CEM use the mean of new distribution
mu = res_dist_parms['mean']
epsilon = samples - np.tile(mu, [samples.shape[0], 1])
#print epsilon
#print weights
covar_new = np.dot(np.transpose([epsilon[i, :] * weights[i] for i in range(len(weights))]), epsilon)
#diagonalize if required, a heuristics is that diagonal covariance is relatively stable
if options['covar_full'] is False:
covar_new = np.diag(np.diag(covar_new))
#apply learning rate
covar_new = (1 - options['covar_learning_rate']) * dist_parms['covar'] + \
options['covar_learning_rate'] * covar_new
else:
print "CMA_ES covariance update has not been implemented yet"
covar_new = dist_parms['covar']
if options['covar_bounds'] is None:
covar_new_bounded = covar_new
else:
#allow bounds on covariance matrix to prevent inmature convergence
#apply scale to dimensions, if not specified, treat them equally
if options['covar_scale'] is None:
covar_scale = np.ones(np.shape(res_dist_parms['mean']))
else:
covar_scale = options['covar_scale']
#from 1D-array to 2D array for transpose product
covar_scale = covar_scale[np.newaxis]
covar_scaled = covar_new * (covar_scale.T.dot(covar_scale))
#The first is the relative lower bound
rel_lower_bound = options['covar_bounds'][0]
#The second is the absolute lower bound if available
if len(options['covar_bounds']) > 1:
abs_lower_bound = options['covar_bounds'][1]
else:
#no valid bound
abs_lower_bound = -1
#The third is the absolute upper bound to prevent explore too much
if len(options['covar_bounds']) > 2:
abs_upper_bound = options['covar_bounds'][2]
else:
#no valid bound
abs_upper_bound = -1
#extract eigen values to control the descent direction
#if covar is diagonal, directly manipulate it, avoid expensive eigen decomposition
if options['covar_full'] is False:
eig_vec = np.eye(np.shape(covar_scaled)[0])
eig_val = np.diag(covar_scaled).copy()
else:
#need to do eigen decomposition...
eig_val, eig_vec = np.linalg.eig(covar_scaled)
#apply bounds to eigen values
#absolute upper
if abs_upper_bound != -1:
eig_val[eig_val > abs_upper_bound] = abs_upper_bound
#relative lower bound
if rel_lower_bound != 1:
if rel_lower_bound < 0 or rel_lower_bound > 1:
print "Relative bound should be within [0, 1]"
else:
#calculate a candidate lower bound from relative value
abs_lower_bound = max([abs_lower_bound, rel_lower_bound * max(eig_val)])
#absolute lower
if abs_lower_bound != -1:
eig_val[eig_val < abs_lower_bound] = abs_lower_bound
#reconstruct covariance matrix
covar_new_bounded = eig_vec.dot(np.diag(eig_val)).dot(eig_vec.T)
#rescale
covar_new_bounded = covar_new_bounded / (covar_scale.T.dot(covar_scale))
res_dist_parms['covar'][var_dim[0]:var_dim[1], var_dim[0]:var_dim[1]] = covar_new_bounded[var_dim[0]:var_dim[1], var_dim[0]:var_dim[1]]
return res_dist_parms, res_dist_parms['mean']
def pygcem_test():
n_iters = 100
#define a task, search a stationary point
x_dest = np.array([0, 0])
x_init = np.array([5, 5])
gcem = GaussianCEM(x0=x_init)
curr_mean = gcem.mean
#take goal
def test_cost(x):
return np.linalg.norm(x - x_dest, axis=1)
for i in range(n_iters):
#take some rollouts from the current policy
#just use the internal sampling
X = gcem.sample(n_samples=10)
y = test_cost(X)
curr_avg_cost = np.mean(y)
print 'Mean: ', curr_mean, '; Avg cost: ', curr_avg_cost
gcem.fit(X, y)
diff = curr_mean - gcem.mean
if np.linalg.norm(diff) < 1e-6:
break
curr_mean = gcem.mean
return
if __name__ == '__main__':
pygcem_test()
|
navigator8972/vae_assoc
|
pygcem.py
|
Python
|
bsd-2-clause
| 8,422
|
[
"Gaussian"
] |
3832929afaf012580a7fd623a87adc822ec61b0acad3c48c85e16a172c4aa14c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.