text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
from com.im.lac.types import MoleculeObject, MoleculeObjectIterable
from java import lang
import sys
#Place the python module on the path
#sys.path.append('/lac/components/rdkit-camel/src/main/python')
from java.util import ArrayList
from find_props.find_props import calc_props
from java.lang import Class
lang.System.loadLibrary('GraphMolWrap')
# Pull it in as a stream of string
from org.RDKit import *
my_funct = request.getHeader("function")
my_list = request.body
counter = 0
calc_props(request, my_funct)
for item in my_list:
counter+=1
request.body = counter
|
InformaticsMatters/squonk
|
components/rdkit-camel/src/main/python/calc_props.py
|
Python
|
apache-2.0
| 580
|
[
"RDKit"
] |
e2403e23b630abe3b1255a5f497c34e7c31b0db7411a05f933a323b88e138f50
|
#!/usr/bin/env python2
##################################################
# GNU Radio Python Flow Graph
# Title: FM Radio
# Author: Brian McLaughlin (bjmclaughlin@gmail.com)
# Generated: Sun Mar 6 13:27:58 2016
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from PyQt4.QtCore import QObject, pyqtSlot
from gnuradio import analog
from gnuradio import audio
from gnuradio import blocks
from gnuradio import digital
from gnuradio import digital;import cmath
from gnuradio import eng_notation
from gnuradio import filter
from gnuradio import gr
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from gnuradio.qtgui import Range, RangeWidget
from grc_gnuradio import blks2 as grc_blks2
from optparse import OptionParser
import cmath
import osmosdr
import rds
import sip
import sys
import time
class fm_radio(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "FM Radio")
Qt.QWidget.__init__(self)
self.setWindowTitle("FM Radio")
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "fm_radio")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Variables
##################################################
self.valid_gains = valid_gains = [0.0, 0.9, 1.4, 2.7, 3.7, 7.7, 8.7, 12.5, 14.4, 15.7, 16.6, 19.7, 20.7, 22.9, 25.4, 28.0, 29.7, 32.8, 33.8, 36.4, 37.2, 38.6, 40.2, 42.1, 43.4, 43.9, 44.5, 48.0, 49.6]
self.samp_rate = samp_rate = 2.048e6
self.baseband_decimation = baseband_decimation = 10
self.rf_gain = rf_gain = len(valid_gains)-1
self.rds_dec = rds_dec = 10
self.pilot_tone = pilot_tone = 19e3
self.baseband_rate = baseband_rate = samp_rate // baseband_decimation
self.stereo_subcarrier = stereo_subcarrier = pilot_tone * 2
self.stereo_button = stereo_button = 0
self.slider_volume = slider_volume = 0
self.sdr_gain = sdr_gain = valid_gains[rf_gain]
self.rds_symbols_per_bit = rds_symbols_per_bit = 2
self.rds_subcarrier = rds_subcarrier = pilot_tone * 3
self.rds_samp_rate = rds_samp_rate = baseband_rate / rds_dec
self.rds_bitrate = rds_bitrate = 1.1875e3
self.rds_bandwidth = rds_bandwidth = 2.83e3
self.fm_station = fm_station = 102.7
self.fm_broadcast_seperation = fm_broadcast_seperation = 0.2
self.fm_broadcast_low = fm_broadcast_low = 87.1
self.fm_broadcast_high = fm_broadcast_high = 107.9
self.audio_rate = audio_rate = 48e3
##################################################
# Blocks
##################################################
self.notebook_top = Qt.QTabWidget()
self.notebook_top_widget_0 = Qt.QWidget()
self.notebook_top_layout_0 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.notebook_top_widget_0)
self.notebook_top_grid_layout_0 = Qt.QGridLayout()
self.notebook_top_layout_0.addLayout(self.notebook_top_grid_layout_0)
self.notebook_top.addTab(self.notebook_top_widget_0, "RF Receive")
self.notebook_top_widget_1 = Qt.QWidget()
self.notebook_top_layout_1 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.notebook_top_widget_1)
self.notebook_top_grid_layout_1 = Qt.QGridLayout()
self.notebook_top_layout_1.addLayout(self.notebook_top_grid_layout_1)
self.notebook_top.addTab(self.notebook_top_widget_1, "Baseband")
self.notebook_top_widget_2 = Qt.QWidget()
self.notebook_top_layout_2 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.notebook_top_widget_2)
self.notebook_top_grid_layout_2 = Qt.QGridLayout()
self.notebook_top_layout_2.addLayout(self.notebook_top_grid_layout_2)
self.notebook_top.addTab(self.notebook_top_widget_2, "Mono Audio")
self.notebook_top_widget_3 = Qt.QWidget()
self.notebook_top_layout_3 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.notebook_top_widget_3)
self.notebook_top_grid_layout_3 = Qt.QGridLayout()
self.notebook_top_layout_3.addLayout(self.notebook_top_grid_layout_3)
self.notebook_top.addTab(self.notebook_top_widget_3, "Sub-Carrier Generation")
self.notebook_top_widget_4 = Qt.QWidget()
self.notebook_top_layout_4 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.notebook_top_widget_4)
self.notebook_top_grid_layout_4 = Qt.QGridLayout()
self.notebook_top_layout_4.addLayout(self.notebook_top_grid_layout_4)
self.notebook_top.addTab(self.notebook_top_widget_4, "Stereo")
self.notebook_top_widget_5 = Qt.QWidget()
self.notebook_top_layout_5 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.notebook_top_widget_5)
self.notebook_top_grid_layout_5 = Qt.QGridLayout()
self.notebook_top_layout_5.addLayout(self.notebook_top_grid_layout_5)
self.notebook_top.addTab(self.notebook_top_widget_5, "RDS")
self.top_grid_layout.addWidget(self.notebook_top, 3, 0, 1, 8)
self._slider_volume_range = Range(0, 11.1, 0.1, 0, 100)
self._slider_volume_win = RangeWidget(self._slider_volume_range, self.set_slider_volume, 'Volume', "counter_slider", float)
self.top_grid_layout.addWidget(self._slider_volume_win, 1, 1, 1, 1)
self.notebook_subcarriers = Qt.QTabWidget()
self.notebook_subcarriers_widget_0 = Qt.QWidget()
self.notebook_subcarriers_layout_0 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.notebook_subcarriers_widget_0)
self.notebook_subcarriers_grid_layout_0 = Qt.QGridLayout()
self.notebook_subcarriers_layout_0.addLayout(self.notebook_subcarriers_grid_layout_0)
self.notebook_subcarriers.addTab(self.notebook_subcarriers_widget_0, "Pilot Signal")
self.notebook_subcarriers_widget_1 = Qt.QWidget()
self.notebook_subcarriers_layout_1 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.notebook_subcarriers_widget_1)
self.notebook_subcarriers_grid_layout_1 = Qt.QGridLayout()
self.notebook_subcarriers_layout_1.addLayout(self.notebook_subcarriers_grid_layout_1)
self.notebook_subcarriers.addTab(self.notebook_subcarriers_widget_1, "Spectrum")
self.notebook_top_grid_layout_3.addWidget(self.notebook_subcarriers, 0, 0, 1, 1)
self.notebook_rds = Qt.QTabWidget()
self.notebook_rds_widget_0 = Qt.QWidget()
self.notebook_rds_layout_0 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.notebook_rds_widget_0)
self.notebook_rds_grid_layout_0 = Qt.QGridLayout()
self.notebook_rds_layout_0.addLayout(self.notebook_rds_grid_layout_0)
self.notebook_rds.addTab(self.notebook_rds_widget_0, "RDS Signal")
self.notebook_rds_widget_1 = Qt.QWidget()
self.notebook_rds_layout_1 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.notebook_rds_widget_1)
self.notebook_rds_grid_layout_1 = Qt.QGridLayout()
self.notebook_rds_layout_1.addLayout(self.notebook_rds_grid_layout_1)
self.notebook_rds.addTab(self.notebook_rds_widget_1, "RDS Bitstream")
self.notebook_top_grid_layout_5.addWidget(self.notebook_rds, 0, 0, 1, 1)
self._fm_station_range = Range(fm_broadcast_low, fm_broadcast_high, fm_broadcast_seperation, 102.7, 200)
self._fm_station_win = RangeWidget(self._fm_station_range, self.set_fm_station, "FM Station", "counter_slider", float)
self.top_grid_layout.addWidget(self._fm_station_win, 0, 0, 1, 8)
self._stereo_button_options = (0, 1, )
self._stereo_button_labels = ("Mono", "Stereo", )
self._stereo_button_tool_bar = Qt.QToolBar(self)
self._stereo_button_tool_bar.addWidget(Qt.QLabel("Audio Output"+": "))
self._stereo_button_combo_box = Qt.QComboBox()
self._stereo_button_tool_bar.addWidget(self._stereo_button_combo_box)
for label in self._stereo_button_labels: self._stereo_button_combo_box.addItem(label)
self._stereo_button_callback = lambda i: Qt.QMetaObject.invokeMethod(self._stereo_button_combo_box, "setCurrentIndex", Qt.Q_ARG("int", self._stereo_button_options.index(i)))
self._stereo_button_callback(self.stereo_button)
self._stereo_button_combo_box.currentIndexChanged.connect(
lambda i: self.set_stereo_button(self._stereo_button_options[i]))
self.top_grid_layout.addWidget(self._stereo_button_tool_bar, 1, 2, 1, 1)
self.rtlsdr_source_0 = osmosdr.source( args="numchan=" + str(1) + " " + "" )
self.rtlsdr_source_0.set_sample_rate(samp_rate)
self.rtlsdr_source_0.set_center_freq(fm_station * 1e6, 0)
self.rtlsdr_source_0.set_freq_corr(14, 0)
self.rtlsdr_source_0.set_dc_offset_mode(2, 0)
self.rtlsdr_source_0.set_iq_balance_mode(0, 0)
self.rtlsdr_source_0.set_gain_mode(False, 0)
self.rtlsdr_source_0.set_gain(sdr_gain, 0)
self.rtlsdr_source_0.set_if_gain(0, 0)
self.rtlsdr_source_0.set_bb_gain(0, 0)
self.rtlsdr_source_0.set_antenna("", 0)
self.rtlsdr_source_0.set_bandwidth(0, 0)
self.root_raised_cosine_filter_0 = filter.fir_filter_ccf(1, firdes.root_raised_cosine(
2, rds_samp_rate, rds_bitrate * rds_symbols_per_bit, 0.275, 16))
self._rf_gain_range = Range(0, len(valid_gains)-1, 1, len(valid_gains)-1, 200)
self._rf_gain_win = RangeWidget(self._rf_gain_range, self.set_rf_gain, "RF Gain", "counter_slider", int)
self.top_grid_layout.addWidget(self._rf_gain_win, 1, 0, 1, 1)
self.rds_qt_panel_0 = self.rds_qt_panel_0 = rds.qt_panel()
self.notebook_top_layout_5.addWidget(self.rds_qt_panel_0)
self.rational_resampler_xxx_0_0_0_1 = filter.rational_resampler_fff(
interpolation=int(audio_rate),
decimation=int(baseband_rate),
taps=None,
fractional_bw=None,
)
self.rational_resampler_xxx_0_0_0_0 = filter.rational_resampler_fff(
interpolation=int(audio_rate),
decimation=int(baseband_rate),
taps=None,
fractional_bw=None,
)
self.rational_resampler_xxx_0_0_0 = filter.rational_resampler_fff(
interpolation=int(audio_rate),
decimation=int(baseband_rate),
taps=None,
fractional_bw=None,
)
self.qtgui_time_sink_x_1 = qtgui.time_sink_f(
1024, #size
samp_rate, #samp_rate
"RBDS Bit Stream", #name
2 #number of inputs
)
self.qtgui_time_sink_x_1.set_update_time(0.10)
self.qtgui_time_sink_x_1.set_y_axis(-1.7, 1.7)
self.qtgui_time_sink_x_1.set_y_label("Amplitude", "")
self.qtgui_time_sink_x_1.enable_tags(-1, False)
self.qtgui_time_sink_x_1.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_1.enable_autoscale(False)
self.qtgui_time_sink_x_1.enable_grid(True)
self.qtgui_time_sink_x_1.enable_control_panel(False)
if not True:
self.qtgui_time_sink_x_1.disable_legend()
labels = ["Raw Bit Stream", "Differential Decoded", "", "", "",
"", "", "", "", ""]
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_1.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_1.set_line_label(i, labels[i])
self.qtgui_time_sink_x_1.set_line_width(i, widths[i])
self.qtgui_time_sink_x_1.set_line_color(i, colors[i])
self.qtgui_time_sink_x_1.set_line_style(i, styles[i])
self.qtgui_time_sink_x_1.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_1.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_1_win = sip.wrapinstance(self.qtgui_time_sink_x_1.pyqwidget(), Qt.QWidget)
self.notebook_rds_layout_1.addWidget(self._qtgui_time_sink_x_1_win)
self.qtgui_time_sink_x_0 = qtgui.time_sink_f(
1024, #size
baseband_rate, #samp_rate
"19 KHz Pilot Signal", #name
1 #number of inputs
)
self.qtgui_time_sink_x_0.set_update_time(0.10)
self.qtgui_time_sink_x_0.set_y_axis(-1.5, 1.5)
self.qtgui_time_sink_x_0.set_y_label("Amplitude", "counts")
self.qtgui_time_sink_x_0.enable_tags(-1, False)
self.qtgui_time_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_0.enable_autoscale(False)
self.qtgui_time_sink_x_0.enable_grid(False)
self.qtgui_time_sink_x_0.enable_control_panel(False)
if not False:
self.qtgui_time_sink_x_0.disable_legend()
labels = ["", "", "", "", "",
"", "", "", "", ""]
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0.pyqwidget(), Qt.QWidget)
self.notebook_subcarriers_grid_layout_0.addWidget(self._qtgui_time_sink_x_0_win, 0, 1, 1, 1)
self.qtgui_freq_sink_x_1 = qtgui.freq_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
rds_samp_rate, #bw
"RDS Subcarrier Signal (DSB-SSC)", #name
2 #number of inputs
)
self.qtgui_freq_sink_x_1.set_update_time(0.10)
self.qtgui_freq_sink_x_1.set_y_axis(-100, 0)
self.qtgui_freq_sink_x_1.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_1.enable_autoscale(False)
self.qtgui_freq_sink_x_1.enable_grid(False)
self.qtgui_freq_sink_x_1.set_fft_average(1.0)
self.qtgui_freq_sink_x_1.enable_control_panel(False)
if not False:
self.qtgui_freq_sink_x_1.disable_legend()
if complex == type(float()):
self.qtgui_freq_sink_x_1.set_plot_pos_half(not True)
labels = ["", "", "", "", "",
"", "", "", "", ""]
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_1.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_1.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_1.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_1.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_1.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_1_win = sip.wrapinstance(self.qtgui_freq_sink_x_1.pyqwidget(), Qt.QWidget)
self.notebook_rds_grid_layout_0 .addWidget(self._qtgui_freq_sink_x_1_win, 0, 0, 1, 1)
self.qtgui_freq_sink_x_0_1_0_1_0 = qtgui.freq_sink_f(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
audio_rate, #bw
"Stereo Audio Left", #name
2 #number of inputs
)
self.qtgui_freq_sink_x_0_1_0_1_0.set_update_time(0.10)
self.qtgui_freq_sink_x_0_1_0_1_0.set_y_axis(-100, -30)
self.qtgui_freq_sink_x_0_1_0_1_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0_1_0_1_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0_1_0_1_0.enable_grid(False)
self.qtgui_freq_sink_x_0_1_0_1_0.set_fft_average(0.1)
self.qtgui_freq_sink_x_0_1_0_1_0.enable_control_panel(False)
if not False:
self.qtgui_freq_sink_x_0_1_0_1_0.disable_legend()
if float == type(float()):
self.qtgui_freq_sink_x_0_1_0_1_0.set_plot_pos_half(not False)
labels = ["Stereo Left", "Stereo Right", "", "", "",
"", "", "", "", ""]
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0_1_0_1_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0_1_0_1_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0_1_0_1_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0_1_0_1_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0_1_0_1_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_1_0_1_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0_1_0_1_0.pyqwidget(), Qt.QWidget)
self.notebook_top_grid_layout_4.addWidget(self._qtgui_freq_sink_x_0_1_0_1_0_win, 0, 0, 1, 1)
self.qtgui_freq_sink_x_0_1_0_1 = qtgui.freq_sink_f(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
audio_rate, #bw
"Stereo Audio Right", #name
2 #number of inputs
)
self.qtgui_freq_sink_x_0_1_0_1.set_update_time(0.10)
self.qtgui_freq_sink_x_0_1_0_1.set_y_axis(-100, -30)
self.qtgui_freq_sink_x_0_1_0_1.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0_1_0_1.enable_autoscale(False)
self.qtgui_freq_sink_x_0_1_0_1.enable_grid(False)
self.qtgui_freq_sink_x_0_1_0_1.set_fft_average(0.1)
self.qtgui_freq_sink_x_0_1_0_1.enable_control_panel(False)
if not False:
self.qtgui_freq_sink_x_0_1_0_1.disable_legend()
if float == type(float()):
self.qtgui_freq_sink_x_0_1_0_1.set_plot_pos_half(not False)
labels = ["Stereo Right", "Stereo Right", "", "", "",
"", "", "", "", ""]
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0_1_0_1.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0_1_0_1.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0_1_0_1.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0_1_0_1.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0_1_0_1.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_1_0_1_win = sip.wrapinstance(self.qtgui_freq_sink_x_0_1_0_1.pyqwidget(), Qt.QWidget)
self.notebook_top_grid_layout_4.addWidget(self._qtgui_freq_sink_x_0_1_0_1_win, 0, 1, 1, 1)
self.qtgui_freq_sink_x_0_1_0_0 = qtgui.freq_sink_f(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
baseband_rate, #bw
"Pilot & Stereo Carrier", #name
2 #number of inputs
)
self.qtgui_freq_sink_x_0_1_0_0.set_update_time(0.10)
self.qtgui_freq_sink_x_0_1_0_0.set_y_axis(-80, 0)
self.qtgui_freq_sink_x_0_1_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0_1_0_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0_1_0_0.enable_grid(False)
self.qtgui_freq_sink_x_0_1_0_0.set_fft_average(0.1)
self.qtgui_freq_sink_x_0_1_0_0.enable_control_panel(False)
if not True:
self.qtgui_freq_sink_x_0_1_0_0.disable_legend()
if float == type(float()):
self.qtgui_freq_sink_x_0_1_0_0.set_plot_pos_half(not False)
labels = ["Pilot Tone", "Stereo Carrier", "RDS Carrier", "", "",
"", "", "", "", ""]
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0_1_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0_1_0_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0_1_0_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0_1_0_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0_1_0_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_1_0_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0_1_0_0.pyqwidget(), Qt.QWidget)
self.notebook_subcarriers_grid_layout_1.addWidget(self._qtgui_freq_sink_x_0_1_0_0_win, 0, 0, 1, 1)
self.qtgui_freq_sink_x_0_0_0 = qtgui.freq_sink_f(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
audio_rate, #bw
"Mono Audio (L+R)", #name
2 #number of inputs
)
self.qtgui_freq_sink_x_0_0_0.set_update_time(0.10)
self.qtgui_freq_sink_x_0_0_0.set_y_axis(-100, -30)
self.qtgui_freq_sink_x_0_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0_0_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0_0_0.enable_grid(False)
self.qtgui_freq_sink_x_0_0_0.set_fft_average(0.2)
self.qtgui_freq_sink_x_0_0_0.enable_control_panel(False)
if not False:
self.qtgui_freq_sink_x_0_0_0.disable_legend()
if float == type(float()):
self.qtgui_freq_sink_x_0_0_0.set_plot_pos_half(not False)
labels = ["", "", "", "", "",
"", "", "", "", ""]
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0_0_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0_0_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0_0_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0_0_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_0_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0_0_0.pyqwidget(), Qt.QWidget)
self.notebook_top_grid_layout_2.addWidget(self._qtgui_freq_sink_x_0_0_0_win, 1, 0, 1, 5)
self.qtgui_freq_sink_x_0_0 = qtgui.freq_sink_f(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
baseband_rate, #bw
"FM Baseband", #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0_0.set_update_time(0.10)
self.qtgui_freq_sink_x_0_0.set_y_axis(-100, -30)
self.qtgui_freq_sink_x_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0_0.enable_grid(False)
self.qtgui_freq_sink_x_0_0.set_fft_average(0.1)
self.qtgui_freq_sink_x_0_0.enable_control_panel(False)
if not False:
self.qtgui_freq_sink_x_0_0.disable_legend()
if float == type(float()):
self.qtgui_freq_sink_x_0_0.set_plot_pos_half(not False)
labels = ["", "", "", "", "",
"", "", "", "", ""]
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0_0.pyqwidget(), Qt.QWidget)
self.notebook_top_grid_layout_1.addWidget(self._qtgui_freq_sink_x_0_0_win, 0, 1, 1, 1)
self.qtgui_freq_sink_x_0 = qtgui.freq_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
fm_station * 1e6, #fc
samp_rate, #bw
"RF Frequency", #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0.set_update_time(0.10)
self.qtgui_freq_sink_x_0.set_y_axis(-90, 0)
self.qtgui_freq_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0.enable_grid(False)
self.qtgui_freq_sink_x_0.set_fft_average(0.1)
self.qtgui_freq_sink_x_0.enable_control_panel(False)
if not False:
self.qtgui_freq_sink_x_0.disable_legend()
if complex == type(float()):
self.qtgui_freq_sink_x_0.set_plot_pos_half(not True)
labels = ["", "", "", "", "",
"", "", "", "", ""]
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0.pyqwidget(), Qt.QWidget)
self.notebook_top_grid_layout_0.addWidget(self._qtgui_freq_sink_x_0_win, 0, 1, 1, 1)
self.qtgui_const_sink_x_0 = qtgui.const_sink_c(
1024, #size
"RDS BPSK Constellation", #name
1 #number of inputs
)
self.qtgui_const_sink_x_0.set_update_time(0.10)
self.qtgui_const_sink_x_0.set_y_axis(-1.6, 1.6)
self.qtgui_const_sink_x_0.set_x_axis(-1.6, 1.6)
self.qtgui_const_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, "")
self.qtgui_const_sink_x_0.enable_autoscale(False)
self.qtgui_const_sink_x_0.enable_grid(True)
if not False:
self.qtgui_const_sink_x_0.disable_legend()
labels = ["RBDS BPSK", "", "", "", "",
"", "", "", "", ""]
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "red", "red", "red",
"red", "red", "red", "red", "red"]
styles = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
markers = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_const_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_const_sink_x_0.set_line_label(i, labels[i])
self.qtgui_const_sink_x_0.set_line_width(i, widths[i])
self.qtgui_const_sink_x_0.set_line_color(i, colors[i])
self.qtgui_const_sink_x_0.set_line_style(i, styles[i])
self.qtgui_const_sink_x_0.set_line_marker(i, markers[i])
self.qtgui_const_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_const_sink_x_0_win = sip.wrapinstance(self.qtgui_const_sink_x_0.pyqwidget(), Qt.QWidget)
self.notebook_rds_grid_layout_0.addWidget(self._qtgui_const_sink_x_0_win, 0, 1, 1, 1)
self.low_pass_filter_4 = filter.fir_filter_fff(1, firdes.low_pass(
1, baseband_rate, 60e3, 1e3, firdes.WIN_HAMMING, 6.76))
self.low_pass_filter_2 = filter.fir_filter_fff(1, firdes.low_pass(
1, baseband_rate, 16e3, 1e3, firdes.WIN_HAMMING, 6.76))
self.low_pass_filter_1 = filter.fir_filter_fff(1, firdes.low_pass(
10, baseband_rate, 15e3, 3e3, firdes.WIN_HAMMING, 6.76))
self.low_pass_filter_0 = filter.fir_filter_ccf(baseband_decimation, firdes.low_pass(
1, samp_rate, 75e3, 1e3, firdes.WIN_HAMMING, 6.76))
self.gr_rds_parser_0 = rds.parser(False, False, 1)
self.gr_rds_decoder_0 = rds.decoder(False, False)
self.freq_xlating_fir_filter_xxx_1 = filter.freq_xlating_fir_filter_fcc(rds_dec, (firdes.low_pass(2500,baseband_rate,rds_bandwidth,1e3,firdes.WIN_HAMMING)), rds_subcarrier, baseband_rate)
self.digital_mpsk_receiver_cc_0 = digital.mpsk_receiver_cc(2, 0, (2 * cmath.pi) / 100, -0.00006, 0.00006, 0.5, 0.05, rds_samp_rate / (rds_bitrate * 2), ((rds_samp_rate / (rds_bitrate * 2)) ** 2)/ 4, 0.005)
self.digital_diff_decoder_bb_0 = digital.diff_decoder_bb(2)
self.digital_binary_slicer_fb_0 = digital.binary_slicer_fb()
self.blocks_uchar_to_float_0_0 = blocks.uchar_to_float()
self.blocks_uchar_to_float_0 = blocks.uchar_to_float()
self.blocks_sub_xx_0 = blocks.sub_ff(1)
self.blocks_multiply_xx_1 = blocks.multiply_vff(1)
self.blocks_multiply_xx_0 = blocks.multiply_vff(1)
self.blocks_multiply_const_vxx_1_0_1_0_1 = blocks.multiply_const_vff((11, ))
self.blocks_multiply_const_vxx_1_0_1_0_0 = blocks.multiply_const_vff((11, ))
self.blocks_multiply_const_vxx_1_0_1_0 = blocks.multiply_const_vff((11, ))
self.blocks_multiply_const_vxx_1_0_1 = blocks.multiply_const_vff((slider_volume, ))
self.blocks_multiply_const_vxx_1_0_0 = blocks.multiply_const_vff((slider_volume, ))
self.blocks_multiply_const_vxx_1_0 = blocks.multiply_const_vff((slider_volume, ))
self.blocks_keep_one_in_n_0 = blocks.keep_one_in_n(gr.sizeof_char*1, 2)
self.blocks_complex_to_real_1 = blocks.complex_to_real(1)
self.blocks_complex_to_real_0 = blocks.complex_to_real(1)
self.blocks_add_xx_0 = blocks.add_vff(1)
self.blocks_add_const_vxx_0_0 = blocks.add_const_vff((0.5, ))
self.blocks_add_const_vxx_0 = blocks.add_const_vff((-1.5, ))
self.blks2_selector_0_0 = grc_blks2.selector(
item_size=gr.sizeof_float*1,
num_inputs=2,
num_outputs=1,
input_index=0,
output_index=0,
)
self.blks2_selector_0 = grc_blks2.selector(
item_size=gr.sizeof_float*1,
num_inputs=2,
num_outputs=1,
input_index=0,
output_index=0,
)
self.band_pass_filter_1 = filter.fir_filter_fff(1, firdes.band_pass(
1, baseband_rate, stereo_subcarrier - 0.5e3, stereo_subcarrier + 0.5e3, 0.5e3, firdes.WIN_HAMMING, 6.76))
self.band_pass_filter_0_0 = filter.fir_filter_fff(1, firdes.band_pass(
1, baseband_rate, 23e3, 53e3, 1e3, firdes.WIN_HAMMING, 6.76))
self.band_pass_filter_0 = filter.fir_filter_fcc(1, firdes.complex_band_pass(
1, baseband_rate, pilot_tone - 0.5e3, pilot_tone+0.5e3, 1e3, firdes.WIN_HAMMING, 6.76))
self.audio_sink_0 = audio.sink(48000, "", True)
self.analog_wfm_rcv_0 = analog.wfm_rcv(
quad_rate=baseband_rate,
audio_decimation=1,
)
self.analog_pll_refout_cc_0 = analog.pll_refout_cc(1e-3, 2 * cmath.pi * (19000+200) / baseband_rate, 2 * cmath.pi * (19000-200) / baseband_rate)
self.analog_fm_deemph_0_0_0_1 = analog.fm_deemph(fs=baseband_rate, tau=75e-6)
self.analog_fm_deemph_0_0_0_0 = analog.fm_deemph(fs=baseband_rate, tau=75e-6)
self.analog_fm_deemph_0_0_0 = analog.fm_deemph(fs=baseband_rate, tau=75e-6)
##################################################
# Connections
##################################################
self.msg_connect((self.gr_rds_decoder_0, 'out'), (self.gr_rds_parser_0, 'in'))
self.msg_connect((self.gr_rds_parser_0, 'out'), (self.rds_qt_panel_0, 'in'))
self.connect((self.analog_fm_deemph_0_0_0, 0), (self.rational_resampler_xxx_0_0_0, 0))
self.connect((self.analog_fm_deemph_0_0_0_0, 0), (self.rational_resampler_xxx_0_0_0_0, 0))
self.connect((self.analog_fm_deemph_0_0_0_1, 0), (self.rational_resampler_xxx_0_0_0_1, 0))
self.connect((self.analog_pll_refout_cc_0, 0), (self.blocks_complex_to_real_1, 0))
self.connect((self.analog_wfm_rcv_0, 0), (self.low_pass_filter_4, 0))
self.connect((self.band_pass_filter_0, 0), (self.analog_pll_refout_cc_0, 0))
self.connect((self.band_pass_filter_0_0, 0), (self.blocks_multiply_xx_1, 1))
self.connect((self.band_pass_filter_1, 0), (self.blocks_multiply_xx_1, 0))
self.connect((self.band_pass_filter_1, 0), (self.qtgui_freq_sink_x_0_1_0_0, 1))
self.connect((self.blks2_selector_0, 0), (self.audio_sink_0, 0))
self.connect((self.blks2_selector_0_0, 0), (self.audio_sink_0, 1))
self.connect((self.blocks_add_const_vxx_0, 0), (self.qtgui_time_sink_x_1, 1))
self.connect((self.blocks_add_const_vxx_0_0, 0), (self.qtgui_time_sink_x_1, 0))
self.connect((self.blocks_add_xx_0, 0), (self.analog_fm_deemph_0_0_0_0, 0))
self.connect((self.blocks_complex_to_real_0, 0), (self.digital_binary_slicer_fb_0, 0))
self.connect((self.blocks_complex_to_real_1, 0), (self.blocks_multiply_xx_0, 0))
self.connect((self.blocks_complex_to_real_1, 0), (self.blocks_multiply_xx_0, 1))
self.connect((self.blocks_complex_to_real_1, 0), (self.qtgui_freq_sink_x_0_1_0_0, 0))
self.connect((self.blocks_complex_to_real_1, 0), (self.qtgui_time_sink_x_0, 0))
self.connect((self.blocks_keep_one_in_n_0, 0), (self.blocks_uchar_to_float_0, 0))
self.connect((self.blocks_keep_one_in_n_0, 0), (self.digital_diff_decoder_bb_0, 0))
self.connect((self.blocks_multiply_const_vxx_1_0, 0), (self.blks2_selector_0, 1))
self.connect((self.blocks_multiply_const_vxx_1_0, 0), (self.qtgui_freq_sink_x_0_1_0_1, 1))
self.connect((self.blocks_multiply_const_vxx_1_0_0, 0), (self.blks2_selector_0_0, 1))
self.connect((self.blocks_multiply_const_vxx_1_0_0, 0), (self.qtgui_freq_sink_x_0_1_0_1_0, 1))
self.connect((self.blocks_multiply_const_vxx_1_0_1, 0), (self.blks2_selector_0, 0))
self.connect((self.blocks_multiply_const_vxx_1_0_1, 0), (self.blks2_selector_0_0, 0))
self.connect((self.blocks_multiply_const_vxx_1_0_1, 0), (self.qtgui_freq_sink_x_0_0_0, 1))
self.connect((self.blocks_multiply_const_vxx_1_0_1_0, 0), (self.qtgui_freq_sink_x_0_0_0, 0))
self.connect((self.blocks_multiply_const_vxx_1_0_1_0_0, 0), (self.qtgui_freq_sink_x_0_1_0_1, 0))
self.connect((self.blocks_multiply_const_vxx_1_0_1_0_1, 0), (self.qtgui_freq_sink_x_0_1_0_1_0, 0))
self.connect((self.blocks_multiply_xx_0, 0), (self.band_pass_filter_1, 0))
self.connect((self.blocks_multiply_xx_1, 0), (self.low_pass_filter_2, 0))
self.connect((self.blocks_sub_xx_0, 0), (self.analog_fm_deemph_0_0_0, 0))
self.connect((self.blocks_uchar_to_float_0, 0), (self.blocks_add_const_vxx_0_0, 0))
self.connect((self.blocks_uchar_to_float_0_0, 0), (self.blocks_add_const_vxx_0, 0))
self.connect((self.digital_binary_slicer_fb_0, 0), (self.blocks_keep_one_in_n_0, 0))
self.connect((self.digital_diff_decoder_bb_0, 0), (self.blocks_uchar_to_float_0_0, 0))
self.connect((self.digital_diff_decoder_bb_0, 0), (self.gr_rds_decoder_0, 0))
self.connect((self.digital_mpsk_receiver_cc_0, 0), (self.blocks_complex_to_real_0, 0))
self.connect((self.digital_mpsk_receiver_cc_0, 0), (self.qtgui_const_sink_x_0, 0))
self.connect((self.freq_xlating_fir_filter_xxx_1, 0), (self.qtgui_freq_sink_x_1, 0))
self.connect((self.freq_xlating_fir_filter_xxx_1, 0), (self.root_raised_cosine_filter_0, 0))
self.connect((self.low_pass_filter_0, 0), (self.analog_wfm_rcv_0, 0))
self.connect((self.low_pass_filter_1, 0), (self.analog_fm_deemph_0_0_0_1, 0))
self.connect((self.low_pass_filter_1, 0), (self.blocks_add_xx_0, 0))
self.connect((self.low_pass_filter_1, 0), (self.blocks_sub_xx_0, 0))
self.connect((self.low_pass_filter_2, 0), (self.blocks_add_xx_0, 1))
self.connect((self.low_pass_filter_2, 0), (self.blocks_sub_xx_0, 1))
self.connect((self.low_pass_filter_4, 0), (self.band_pass_filter_0, 0))
self.connect((self.low_pass_filter_4, 0), (self.band_pass_filter_0_0, 0))
self.connect((self.low_pass_filter_4, 0), (self.freq_xlating_fir_filter_xxx_1, 0))
self.connect((self.low_pass_filter_4, 0), (self.low_pass_filter_1, 0))
self.connect((self.low_pass_filter_4, 0), (self.qtgui_freq_sink_x_0_0, 0))
self.connect((self.rational_resampler_xxx_0_0_0, 0), (self.blocks_multiply_const_vxx_1_0, 0))
self.connect((self.rational_resampler_xxx_0_0_0, 0), (self.blocks_multiply_const_vxx_1_0_1_0_0, 0))
self.connect((self.rational_resampler_xxx_0_0_0_0, 0), (self.blocks_multiply_const_vxx_1_0_0, 0))
self.connect((self.rational_resampler_xxx_0_0_0_0, 0), (self.blocks_multiply_const_vxx_1_0_1_0_1, 0))
self.connect((self.rational_resampler_xxx_0_0_0_1, 0), (self.blocks_multiply_const_vxx_1_0_1, 0))
self.connect((self.rational_resampler_xxx_0_0_0_1, 0), (self.blocks_multiply_const_vxx_1_0_1_0, 0))
self.connect((self.root_raised_cosine_filter_0, 0), (self.digital_mpsk_receiver_cc_0, 0))
self.connect((self.root_raised_cosine_filter_0, 0), (self.qtgui_freq_sink_x_1, 1))
self.connect((self.rtlsdr_source_0, 0), (self.low_pass_filter_0, 0))
self.connect((self.rtlsdr_source_0, 0), (self.qtgui_freq_sink_x_0, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "fm_radio")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_valid_gains(self):
return self.valid_gains
def set_valid_gains(self, valid_gains):
self.valid_gains = valid_gains
self.set_rf_gain(len(self.valid_gains)-1)
self.set_sdr_gain(self.valid_gains[self.rf_gain])
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.set_baseband_rate(self.samp_rate // self.baseband_decimation)
self.low_pass_filter_0.set_taps(firdes.low_pass(1, self.samp_rate, 75e3, 1e3, firdes.WIN_HAMMING, 6.76))
self.qtgui_freq_sink_x_0.set_frequency_range(self.fm_station * 1e6, self.samp_rate)
self.qtgui_time_sink_x_1.set_samp_rate(self.samp_rate)
self.rtlsdr_source_0.set_sample_rate(self.samp_rate)
def get_baseband_decimation(self):
return self.baseband_decimation
def set_baseband_decimation(self, baseband_decimation):
self.baseband_decimation = baseband_decimation
self.set_baseband_rate(self.samp_rate // self.baseband_decimation)
def get_rf_gain(self):
return self.rf_gain
def set_rf_gain(self, rf_gain):
self.rf_gain = rf_gain
self.set_sdr_gain(self.valid_gains[self.rf_gain])
def get_rds_dec(self):
return self.rds_dec
def set_rds_dec(self, rds_dec):
self.rds_dec = rds_dec
self.set_rds_samp_rate(self.baseband_rate / self.rds_dec)
def get_pilot_tone(self):
return self.pilot_tone
def set_pilot_tone(self, pilot_tone):
self.pilot_tone = pilot_tone
self.set_rds_subcarrier(self.pilot_tone * 3)
self.set_stereo_subcarrier(self.pilot_tone * 2)
self.band_pass_filter_0.set_taps(firdes.complex_band_pass(1, self.baseband_rate, self.pilot_tone - 0.5e3, self.pilot_tone+0.5e3, 1e3, firdes.WIN_HAMMING, 6.76))
def get_baseband_rate(self):
return self.baseband_rate
def set_baseband_rate(self, baseband_rate):
self.baseband_rate = baseband_rate
self.set_rds_samp_rate(self.baseband_rate / self.rds_dec)
self.analog_pll_refout_cc_0.set_max_freq(2 * cmath.pi * (19000+200) / self.baseband_rate)
self.analog_pll_refout_cc_0.set_min_freq(2 * cmath.pi * (19000-200) / self.baseband_rate)
self.band_pass_filter_0.set_taps(firdes.complex_band_pass(1, self.baseband_rate, self.pilot_tone - 0.5e3, self.pilot_tone+0.5e3, 1e3, firdes.WIN_HAMMING, 6.76))
self.band_pass_filter_0_0.set_taps(firdes.band_pass(1, self.baseband_rate, 23e3, 53e3, 1e3, firdes.WIN_HAMMING, 6.76))
self.band_pass_filter_1.set_taps(firdes.band_pass(1, self.baseband_rate, self.stereo_subcarrier - 0.5e3, self.stereo_subcarrier + 0.5e3, 0.5e3, firdes.WIN_HAMMING, 6.76))
self.freq_xlating_fir_filter_xxx_1.set_taps((firdes.low_pass(2500,self.baseband_rate,self.rds_bandwidth,1e3,firdes.WIN_HAMMING)))
self.low_pass_filter_1.set_taps(firdes.low_pass(10, self.baseband_rate, 15e3, 3e3, firdes.WIN_HAMMING, 6.76))
self.low_pass_filter_2.set_taps(firdes.low_pass(1, self.baseband_rate, 16e3, 1e3, firdes.WIN_HAMMING, 6.76))
self.low_pass_filter_4.set_taps(firdes.low_pass(1, self.baseband_rate, 60e3, 1e3, firdes.WIN_HAMMING, 6.76))
self.qtgui_freq_sink_x_0_0.set_frequency_range(0, self.baseband_rate)
self.qtgui_freq_sink_x_0_1_0_0.set_frequency_range(0, self.baseband_rate)
self.qtgui_time_sink_x_0.set_samp_rate(self.baseband_rate)
def get_stereo_subcarrier(self):
return self.stereo_subcarrier
def set_stereo_subcarrier(self, stereo_subcarrier):
self.stereo_subcarrier = stereo_subcarrier
self.band_pass_filter_1.set_taps(firdes.band_pass(1, self.baseband_rate, self.stereo_subcarrier - 0.5e3, self.stereo_subcarrier + 0.5e3, 0.5e3, firdes.WIN_HAMMING, 6.76))
def get_stereo_button(self):
return self.stereo_button
def set_stereo_button(self, stereo_button):
self.stereo_button = stereo_button
self._stereo_button_callback(self.stereo_button)
def get_slider_volume(self):
return self.slider_volume
def set_slider_volume(self, slider_volume):
self.slider_volume = slider_volume
self.blocks_multiply_const_vxx_1_0.set_k((self.slider_volume, ))
self.blocks_multiply_const_vxx_1_0_0.set_k((self.slider_volume, ))
self.blocks_multiply_const_vxx_1_0_1.set_k((self.slider_volume, ))
def get_sdr_gain(self):
return self.sdr_gain
def set_sdr_gain(self, sdr_gain):
self.sdr_gain = sdr_gain
self.rtlsdr_source_0.set_gain(self.sdr_gain, 0)
def get_rds_symbols_per_bit(self):
return self.rds_symbols_per_bit
def set_rds_symbols_per_bit(self, rds_symbols_per_bit):
self.rds_symbols_per_bit = rds_symbols_per_bit
self.root_raised_cosine_filter_0.set_taps(firdes.root_raised_cosine(2, self.rds_samp_rate, self.rds_bitrate * self.rds_symbols_per_bit, 0.275, 16))
def get_rds_subcarrier(self):
return self.rds_subcarrier
def set_rds_subcarrier(self, rds_subcarrier):
self.rds_subcarrier = rds_subcarrier
self.freq_xlating_fir_filter_xxx_1.set_center_freq(self.rds_subcarrier)
def get_rds_samp_rate(self):
return self.rds_samp_rate
def set_rds_samp_rate(self, rds_samp_rate):
self.rds_samp_rate = rds_samp_rate
self.digital_mpsk_receiver_cc_0.set_omega(self.rds_samp_rate / (self.rds_bitrate * 2))
self.digital_mpsk_receiver_cc_0.set_gain_omega(((self.rds_samp_rate / (self.rds_bitrate * 2)) ** 2)/ 4)
self.qtgui_freq_sink_x_1.set_frequency_range(0, self.rds_samp_rate)
self.root_raised_cosine_filter_0.set_taps(firdes.root_raised_cosine(2, self.rds_samp_rate, self.rds_bitrate * self.rds_symbols_per_bit, 0.275, 16))
def get_rds_bitrate(self):
return self.rds_bitrate
def set_rds_bitrate(self, rds_bitrate):
self.rds_bitrate = rds_bitrate
self.digital_mpsk_receiver_cc_0.set_omega(self.rds_samp_rate / (self.rds_bitrate * 2))
self.digital_mpsk_receiver_cc_0.set_gain_omega(((self.rds_samp_rate / (self.rds_bitrate * 2)) ** 2)/ 4)
self.root_raised_cosine_filter_0.set_taps(firdes.root_raised_cosine(2, self.rds_samp_rate, self.rds_bitrate * self.rds_symbols_per_bit, 0.275, 16))
def get_rds_bandwidth(self):
return self.rds_bandwidth
def set_rds_bandwidth(self, rds_bandwidth):
self.rds_bandwidth = rds_bandwidth
self.freq_xlating_fir_filter_xxx_1.set_taps((firdes.low_pass(2500,self.baseband_rate,self.rds_bandwidth,1e3,firdes.WIN_HAMMING)))
def get_fm_station(self):
return self.fm_station
def set_fm_station(self, fm_station):
self.fm_station = fm_station
self.qtgui_freq_sink_x_0.set_frequency_range(self.fm_station * 1e6, self.samp_rate)
self.rds_qt_panel_0.set_frequency(float(self.fm_station))
self.rtlsdr_source_0.set_center_freq(self.fm_station * 1e6, 0)
def get_fm_broadcast_seperation(self):
return self.fm_broadcast_seperation
def set_fm_broadcast_seperation(self, fm_broadcast_seperation):
self.fm_broadcast_seperation = fm_broadcast_seperation
def get_fm_broadcast_low(self):
return self.fm_broadcast_low
def set_fm_broadcast_low(self, fm_broadcast_low):
self.fm_broadcast_low = fm_broadcast_low
def get_fm_broadcast_high(self):
return self.fm_broadcast_high
def set_fm_broadcast_high(self, fm_broadcast_high):
self.fm_broadcast_high = fm_broadcast_high
def get_audio_rate(self):
return self.audio_rate
def set_audio_rate(self, audio_rate):
self.audio_rate = audio_rate
self.qtgui_freq_sink_x_0_0_0.set_frequency_range(0, self.audio_rate)
self.qtgui_freq_sink_x_0_1_0_1.set_frequency_range(0, self.audio_rate)
self.qtgui_freq_sink_x_0_1_0_1_0.set_frequency_range(0, self.audio_rate)
if __name__ == '__main__':
parser = OptionParser(option_class=eng_option, usage="%prog: [options]")
(options, args) = parser.parse_args()
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
Qt.QApplication.setGraphicsSystem(gr.prefs().get_string('qtgui','style','raster'))
qapp = Qt.QApplication(sys.argv)
tb = fm_radio()
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
tb = None # to clean up Qt widgets
|
SpinStabilized/fm_radio
|
fm_radio.py
|
Python
|
gpl-3.0
| 50,200
|
[
"Brian"
] |
76a3e1629042f032f55bed5eee5946436a1c183cb00547ab0146bf4cc1d0ef51
|
"""
Initializers
"""
import numpy as np
import theano
from blocks.initialization import NdarrayInitialization
class GlorotUniform(NdarrayInitialization):
"""Initialize parameters from an isotropic Gaussian distribution.
Parameters
----------
std : float, optional
The standard deviation of the Gaussian distribution. Defaults to 1.
mean : float, optional
The mean of the Gaussian distribution. Defaults to 0
Notes
-----
Be careful: the standard deviation goes first and the mean goes
second!
"""
def __init__(self):
pass
def generate(self, rng, shape):
if len(shape) == 1:
return rng.uniform(size=shape, low=-0.00001, high=0.00001).astype(theano.config.floatX)
if not len(shape) == 2:
raise NotImplementedError("GlorotUniform doesnt work for " + str(shape) + " shape")
fan_in, fan_out = shape[0], shape[1]
s = np.sqrt(6. / (fan_in + fan_out))
return rng.uniform(size=shape, low=-s, high=s).astype(theano.config.floatX)
def __repr__(self):
return "GlorotUniform"
|
tombosc/dict_based_learning
|
dictlearn/inits.py
|
Python
|
mit
| 1,120
|
[
"Gaussian"
] |
299bf04a21a2a5a77f4c314438016dd12cdef37a54042e9ce400270d1ad1a3b7
|
from sword2 import HttpLayer, HttpResponse
from octopus.lib import http
import json
from requests.auth import HTTPBasicAuth
class OctopusHttpResponse(HttpResponse):
def __init__(self, *args, **kwargs):
self.resp = None
if len(args) > 0:
self.resp = args[0]
def __getitem__(self, att):
return self.get(att)
def __repr__(self):
return self.resp.__repr__()
@property
def status(self):
if self.resp is None:
return 408 # timeout
return self.resp.status_code
def get(self, att, default=None):
if att == "status":
return self.status
if self.resp is None:
return default
return self.resp.headers.get(att, default)
def keys(self):
return self.resp.headers.keys()
class OctopusHttpLayer(HttpLayer):
def __init__(self, *args, **kwargs):
self.username = None
self.password = None
self.auth = None
def add_credentials(self, username, password):
self.username = username
self.password = password
self.auth = HTTPBasicAuth(username, password)
def request(self, uri, method, headers=None, payload=None): # Note that body can be file-like
resp = None
if method == "GET":
resp = http.get(uri, headers=headers, auth=self.auth)
elif method == "POST":
resp = http.post(uri, headers=headers, data=payload, auth=self.auth)
elif method == "PUT":
resp = http.put(uri, headers=headers, data=payload, auth=self.auth)
elif method == "DELETE":
resp = http.delete(uri, headers=headers, auth=self.auth)
if resp is None:
return OctopusHttpResponse(), u""
return OctopusHttpResponse(resp), resp.text
|
JiscPER/magnificent-octopus
|
octopus/modules/swordv2/client_http.py
|
Python
|
apache-2.0
| 1,816
|
[
"Octopus"
] |
467a08682df310f9300d5a34951feeaa048f209168c6977e8c5db44c9793ddac
|
"""
Tests the converters
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tempfile
import unittest
import pysam
import ga4gh.backend as backend
import ga4gh.client as client
import ga4gh.converters as converters
class TestSamConverter(unittest.TestCase):
"""
Tests for the GA4GH reads API -> SAM conversion.
"""
def setUp(self):
self._backend = backend.FileSystemBackend("tests/data")
self._client = client.LocalClient(self._backend)
def verifySamRecordsEqual(self, sourceReads, convertedReads):
"""
Verify that a read from pysam matches a read from the reference server
"""
self.assertEqual(len(sourceReads), len(convertedReads))
for source, converted in zip(sourceReads, convertedReads):
self.assertEqual(source.query_name, converted.query_name)
self.assertEqual(source.query_sequence, converted.query_sequence)
self.assertEqual(source.flag, converted.flag)
self.assertEqual(source.reference_id, converted.reference_id)
self.assertEqual(
source.mapping_quality,
converted.mapping_quality)
self.assertEqual(
source.template_length,
converted.template_length)
self.assertEqual(
source.query_qualities, converted.query_qualities)
# TODO the below fields can not be tested since we don't
# encode them in the case that either the read is not mapped
# or the read pair is not mapped
# self.assertEqual(
# source.reference_start,
# converted.reference_start)
# self.assertEqual(source.cigar, converted.cigar)
# self.assertEqual(
# source.next_reference_id,
# converted.next_reference_id)
# self.assertEqual(
# source.next_reference_start,
# converted.next_reference_start)
# TODO can't uncomment until round trip tags are fixed;
# see schemas issue 758
# self.assertEqual(
# source.tags,
# converted.tags)
def verifyFullConversion(self, readGroupSet, readGroup, reference):
"""
Verify that the conversion of the specified readGroup in the
specified readGroupSet for the specified reference is correct.
This involves pulling out the reads from the original BAM file
and comparing these with the converted SAM records.
"""
with tempfile.NamedTemporaryFile() as fileHandle:
converter = converters.SamConverter(
self._client, readGroup.getId(), reference.getId(),
outputFileName=fileHandle.name)
converter.convert()
samFile = pysam.AlignmentFile(fileHandle.name, "r")
try:
convertedReads = list(samFile.fetch())
finally:
samFile.close()
samFile = pysam.AlignmentFile(
readGroupSet.getSamFilePath(), "rb")
try:
sourceReads = []
referenceName = reference.getName().encode()
readGroupName = readGroup.getLocalId().encode()
for readAlignment in samFile.fetch(referenceName):
tags = dict(readAlignment.tags)
if 'RG' in tags and tags['RG'] == readGroupName:
sourceReads.append(readAlignment)
finally:
samFile.close()
self.verifySamRecordsEqual(sourceReads, convertedReads)
def testSamConversion(self):
datasets = self._backend.getDatasets()
for dataset in datasets:
readGroupSets = dataset.getReadGroupSets()
for readGroupSet in readGroupSets:
referenceSet = readGroupSet.getReferenceSet()
for reference in referenceSet.getReferences():
for readGroup in readGroupSet.getReadGroups():
self.verifyFullConversion(
readGroupSet, readGroup, reference)
|
diekhans/ga4gh-server
|
tests/unit/test_converters.py
|
Python
|
apache-2.0
| 4,241
|
[
"pysam"
] |
15aaf3474627ea95fc3c9521021358d1948376ceebb94b300cfd0cf85a0870f3
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Fpocket(MakefilePackage):
"""fpocket is a very fast open source protein pocket detection algorithm
based on Voronoi tessellation."""
homepage = "https://github.com/Discngine/fpocket"
version('master', branch='master',
git='https://github.com/Discngine/fpocket.git')
depends_on("netcdf-c")
def setup_build_environment(self, env):
if self.compiler.name == 'gcc':
env.set('CXX', 'g++')
def edit(self):
makefile = FileFilter('makefile')
makefile.filter('BINDIR .*', 'BINDIR = %s/bin' % self.prefix)
makefile.filter('MANDIR .*', 'MANDIR = %s/man/man8' % self.prefix)
|
iulian787/spack
|
var/spack/repos/builtin/packages/fpocket/package.py
|
Python
|
lgpl-2.1
| 879
|
[
"NetCDF"
] |
510254068062a27e7f5e3997b3f8b065b3452561ad1fc08ccd9a7d68a4b6df88
|
'''
Created on Feb 5, 2017
@author: Jake
'''
from Neuron import *
from Connection import *
import numpy as np
import Draw_Test
import Population
class Individual(object):
genome = {}
inputNodes = []
hiddenNodes = []
outputNodes = []
neuronList = []
fitness = 0.0
ID = 0
def __init__(self):
self.genome = {}
self.inputNodes = []
self.inputIDS = []
self.outputIDS = []
self.hiddenNodes = []
self.outputNodes = []
self.neuronList = []
self.fitness = 0
self.ID = Population.CURR_IND_ID
Population.CURR_IND_ID+=1
self.initialize()
def initialize(self):
hiddenNum = np.random.randint(1,Population.INPUT_NUM*2)
#Think about neuronList at some point
for x in range(Population.INPUT_NUM):
n0=Neuron()
n0.neuronType ="input"
n0.neuronID=x
self.inputNodes.append(n0)
self.inputIDS.append(x)
for x in range(Population.OUTPUT_NUM):
n1 = Neuron()
n1.neuronType ='output'
n1.neuronID=x
self.outputNodes.append(n1)
self.outputIDS.append(x)
for x in range(hiddenNum):
n2 = Neuron()
n2.neuronType = 'hidden'
self.hiddenNodes.append(n2)
for i in self.inputNodes:
for h in self.hiddenNodes:
c = Connection(Population.CONNECTION_ID, i, h, np.random.uniform(Population.START_WEIGHT*2)-1)
self.genome[Population.CONNECTION_ID] = c
Population.CONNECTION_ID+=1
i.outCons.append(c)
h.inCons.append(c)
if i.neuronID not in self.inputIDS:
self.inputNodes.append(i)
self.neuronList.append(i)
self.inputIDS.append(i.neuronID)
for h in self.hiddenNodes:
for o in self.outputNodes:
c = Connection(Population.CONNECTION_ID, h, o, np.random.uniform())
self.genome[Population.CONNECTION_ID] = c
Population.CONNECTION_ID+=1
h.outCons.append(c)
o.inCons.append(c)
if o.neuronID not in self.outputIDS:
self.outputNodes.append(o)
self.neuronList.append(o)
self.outputIDS.append(o.neuronID)
def mutateConnection(self,sNeuron = None,eNeuron = None):
if sNeuron == None:
iLayer = [self.inputNodes, self.hiddenNodes][random.getrandbits(1)]
oLayer = [self.hiddenNodes, self.outputNodes][random.getrandbits(1)]
sNeuron = iLayer[np.random.randint(0,len(iLayer))]
eNeuron = oLayer[np.random.randint(0,len(oLayer))]
w = random.uniform(-Population.START_WEIGHT, Population.START_WEIGHT)
newC = Connection(Population.CONNECTION_ID, sNeuron, eNeuron, w)
self.genome[Population.CONNECTION_ID] = newC
Population.CONNECTION_ID+=1
sNeuron.outCons.append(newC)
eNeuron.inCons.append(newC)
def addStruct(self,connection):
inp=connection.inNeuron
out=connection.outNeuron
if inp.neuronType=="input" and inp.neuronID not in self.inputIDS:
self.inputNodes.append(inp)
self.neuronList.append(inp)
self.inputIDS.append(inp.neuronID)
if inp.neuronType=="hidden" and inp.neuronID not in self.genome:
self.hiddenNodes.append(inp)
self.neuronList.append(inp)
if out.neuronType=="hidden" and out.neuronID not in self.genome:
self.hiddenNodes.append(out)
self.neuronList.append(out)
if out.neuronType=="output" and out.neuronID not in self.outputIDS:
self.outputNodes.append(out)
self.neuronList.append(out)
self.outputIDS.append(out.neuronID)
copy=connection.copy()
if random.randint(0,99) <= Population.MUTATION_WEIGHT_RATE:
copy.mutateWeight()
self.genome[connection.ID]=copy
def mutateAddNeuron(self):
iLayer = [self.inputNodes, self.hiddenNodes][random.getrandbits(1)]
oLayer = [self.hiddenNodes, self.outputNodes][random.getrandbits(1)]
startNeuron = iLayer[np.random.randint(0,len(iLayer))]
endNeuron = oLayer[np.random.randint(0,len(oLayer))]
newNeuron = Neuron()
self.mutateConnection(startNeuron,newNeuron)
self.mutateConnection(newNeuron,endNeuron)
self.hiddenNodes.append(newNeuron)
self.neuronList.append(newNeuron)
def setInputs(self, inputList):
for i in range(len(self.inputNodes)):
node = self.inputNodes[i]
node.value = inputList[i]
def calcOut(self,sigmoid=True):
assert(self.inputNodes[0].value!=None)
outList=[]
index=0
for output in self.outputNodes:
outList.append(output.neuralNet(sigmoid))
index+=1
return outList
def __contains__(self, l):
for i in l:
if self.equals(i): return True
return False
def equals(self, other):
if cmp(self.genome, other.genome)!=0 : return False
# if set(self.inputNodes) != set(other.inputNodes) : return False
# if set(self.hiddenNodes) != set(other.hiddenNodes) : return False
# if set(self.outputNodes) != set(other.outputNodes) : return False
# if set(self.neuronList) != set(other.neuronList) : return False
# if set(self.fitness) != set(other.fitness) : return False
print "EQUAL"
return True
def __str__(self):
return """
Inputs: %d
Hidden: %d
Output: %d
Number Connections: %d
"""% (len(self.inputNodes), len(self.hiddenNodes), len(self.outputNodes), len(self.genome))
def drawNet(self):
vertical_distance_between_layers = 6
horizontal_distance_between_neurons = 2
neuron_radius = 0.5
number_of_neurons_in_widest_layer = 4
network = Draw_Test.NeuralNetwork()
# weights to convert from 10 outputs to 4 (decimal digits to their binary representation)
inputWeights = np.array([[self.inputNodes[i].outCons[i].weight for i in range(len(self.inputNodes))] for j in range(len(self.hiddenNodes[0]))])
network.add_layer(len(self.inputNodes),inputWeights,'input')
for x in range(len(self.hiddenNodes)-1):
hiddenWeights = np.array([[self.hiddenNodes[x][i].outCons[i].weight for i in range(len(self.hiddenNodes[x]))] for j in range(len(self.hiddenNodes[x+1]))])
network.add_layer(len(self.hiddenNodes[x]),hiddenWeights,'hidden')
finalHiddenWeights = np.array([[self.hiddenNodes[-1][i].outCons[i].weight for i in range(len(self.hiddenNodes[-1]))] for j in range(len(self.outputNodes))])
network.add_layer(len(self.hiddenNodes[-1]),finalHiddenWeights,'hidden')
network.add_layer(len(self.outputNodes),'output')
network.draw()
def traverseConstruct(self, nodes):
for node in nodes:
self.neuronSet.add(node)
if node.neuronType =='input': self.inputNodes.append(node)
if node.neuronType =='output': self.outputNodes.append(node)
print self.inputNodes
for n in self.outputNodes:
layer = 0
for c in n.inCons:
n = c.outNeuron
while n.neuronType == 'hidden':
self.hiddenNodes.append([])
for c in n.inCons:
n = c.outNeuron
self.hiddenNodes[layer].append(c.outNeuron)
layer += 1
print self.hiddenNodes
if __name__ == '__main__':
i1= Individual()
i2= Individual()
i3= Individual()
print i1, i2, i3
# n0=Neuron()
# n1=Neuron()
# n2=Neuron()
# n3=Neuron()
# n4=Neuron()
# n0.neuronType="input"
# n1.neuronType="input"
# n2.neuronType="hidden"
# n3.neuronType="hidden"
# n4.neuronType="output"
# c0=Connection(0,n0,n2,.1,True)
# c1=Connection(1,n0,n3,.05,True)
# c2=Connection(2,n1,n2,5,True)
# c3=Connection(3,n1,n3,.7,True)
# c4=Connection(4,n2,n4,.9,True)
# c5=Connection(5,n3,n4,.8,True)
# n0.outCons.append(c0)
# n0.outCons.append(c1)
# n1.outCons.append(c2)
# n1.outCons.append(c3)
# n2.outCons.append(c4)
# n3.outCons.append(c4)
# n2.inCons.append(c0)
# n2.inCons.append(c1)
# n3.inCons.append(c0)
# n3.inCons.append(c1)
# n4.inCons.append(c2)
# n4.inCons.append(c3)
# n0.value= .001
# n1.value= .0000000005
# # i = Individual()
# # # i.traverseConstruct([n0,n1,n2,n3,n4])
# # i.drawNet()
# print n4.neuralNet(sigmoid=True)
|
JakeShulman/G-Neat
|
Individual.py
|
Python
|
apache-2.0
| 7,797
|
[
"NEURON"
] |
a5d6bdfa52cf2df94a2f6b997261d2108ccda55579bcc5775da8637abed965d3
|
import networkx as nx
import numpy as np
class Skeleton(object):
def __init__(self, skeleton_id, graph ):
self.skeleton_id = skeleton_id
self.graph = graph
def get_skeleton(connection, skeleton_id):
""" Fetch a skeleton from the database and return it as a Skeleton object containing
a NetworkX graph for the morphology as nodes, edges between the skeleton nodes,
and a mapping between node id and tag, and a mapping of all associated connectors. """
d = connection.fetch('{0}/1/1/compact-skeleton'.format(skeleton_id))
# create a graph representation of the skeleton
g = nx.DiGraph()
for treenode in d[0]:
if treenode[1]:
# Will create the nodes when not existing yet
g.add_edge(treenode[1], treenode[0], {'confidence': treenode[7]})
else:
# The root node
g.add_node(treenode[0])
properties = g.node[treenode[0]]
properties['user_id'] = treenode[2]
properties['radius'] = treenode[6]
properties['location'] = np.array([treenode[3], treenode[4], treenode[5]], dtype=np.float32)
# tags are text vs list of treenode IDs, add them to the skeleton graph nodes as property
for tag, treenodes in d[2].iteritems():
for treenode_id in treenodes:
tags = g[treenode_id].get('tags')
if tags:
tags.append(tag)
else:
g[treenode_id]['tags'] = [tag]
# convert connectors into a lookup table
connectors = {}
relations = {0: 'presynaptic_to',
1: 'postsynaptic_to'}
for connector in d[1]:
treenode_id = connector[0]
connector_id = connector[1]
relation = relations[connector[2]]
location = np.array( [connector[3], connector[4], connector[5]], dtype = np.float32 )
if connector_id in connectors:
connectors[connector_id][relation].append( treenode_id )
else:
connectors[connector_id] = {'location': location}
connectors[connector_id]['presynaptic_to'] = []
connectors[connector_id]['postsynaptic_to'] = []
connectors[connector_id][relation].append( treenode_id )
# for each connector of the skeleton, retrieve all associated relations
# including their skeleton node id and skeleton id
postdata = ''
for i, connectorid in enumerate( map(str, connectors.keys() ) ):
postdata += '&connector_ids%5B' + str(i) + '%5D=' + str( connectorid )
results = connection.fetch('connector/skeletons', postdata)
connectordata = {}
if len( results ) != 0:
for conn in results:
connectordata[conn[0]] = conn[1]
# retrieve the name of the neuron and its id
neuron = connection.fetch('skeleton/{0}/neuronname'.format(skeleton_id))
skeleton = Skeleton(skeleton_id, g)
skeleton.root_node_id = find_root( g )
skeleton.neuronname = neuron['neuronname']
skeleton.neuron_id = neuron['neuronid']
skeleton.connectors = connectors
skeleton.connectordata = connectordata
return skeleton
def find_root(tree):
""" Search and return the first node that has zero predecessors.
Will be the root node in directed graphs.
Avoids one database lookup. """
for node in tree:
if not next(tree.predecessors_iter(node), None):
return node
|
catmaid/CAT
|
cat/morphology.py
|
Python
|
mit
| 3,392
|
[
"NEURON"
] |
93e357538f66217325f9c19f0aed164c8f45c68d0746650d58d1f1d6afb21578
|
# Copyright (C) 2010 CAMd
# Copyright (C) 2010 Argonne National Laboratory
# Please see the accompanying LICENSE file for further information.
import numpy as np
from gpaw.mpi import SerialCommunicator, serial_comm
from gpaw.matrix_descriptor import MatrixDescriptor, \
BandMatrixDescriptor, \
BlacsBandMatrixDescriptor
from blacs import BlacsGrid, Redistributor
from gpaw.utilities import uncamelcase
from gpaw.utilities.blas import gemm, r2k, gemmdot
from gpaw.utilities.lapack import diagonalize, general_diagonalize, \
inverse_cholesky
from gpaw.utilities.scalapack import pblas_simple_gemm, pblas_tran
from gpaw.utilities.tools import tri2full
from gpaw.utilities.timing import nulltimer
def get_KohnSham_layouts(sl, mode, gd, bd, dtype, **kwargs):
"""Create Kohn-Sham layouts object."""
# Not needed for AtomPAW special mode, as usual we just provide whatever
# happens to make the code not crash
if not isinstance(mode, str):
return None #XXX
name = {'fd': 'BandLayouts', 'lcao': 'OrbitalLayouts'}[mode]
args = (gd, bd, dtype)
if sl is not None:
name = 'Blacs' + name
assert len(sl) == 3
args += tuple(sl)
ksl = {'BandLayouts': BandLayouts,
'BlacsBandLayouts': BlacsBandLayouts,
'BlacsOrbitalLayouts': BlacsOrbitalLayouts,
'OrbitalLayouts': OrbitalLayouts,
}[name](*args, **kwargs)
if 0: #XXX debug
print('USING KSL: %s' % repr(ksl))
assert isinstance(ksl, KohnShamLayouts)
assert isinstance(ksl, BlacsLayouts) == (sl is not None)
return ksl
class KohnShamLayouts:
using_blacs = False # This is only used by a regression test
matrix_descriptor_class = None
def __init__(self, gd, bd, dtype, timer=nulltimer):
assert gd.comm.parent is bd.comm.parent # must have same parent comm
self.world = bd.comm.parent
self.gd = gd
self.bd = bd
self.dtype = dtype
# Columncomm contains all gd.comm.rank == 0, i.e. "grid-masters"
# Blockcomm contains all ranks with the same k-point or spin but
# different subdomains and band groups
bcommsize = self.bd.comm.size
gcommsize = self.gd.comm.size
shiftks = self.world.rank - self.world.rank % (bcommsize * gcommsize)
column_ranks = shiftks + np.arange(bcommsize) * gcommsize
block_ranks = shiftks + np.arange(bcommsize * gcommsize)
self.column_comm = self.world.new_communicator(column_ranks)
self.block_comm = self.world.new_communicator(block_ranks)
self.timer = timer
self._kwargs = {'timer': timer}
def get_keywords(self):
return self._kwargs.copy() # just a shallow copy...
def diagonalize(self, *args, **kwargs):
raise RuntimeError('Virtual member function should not be called.')
def inverse_cholesky(self, *args, **kwargs):
raise RuntimeError('Virtual member function should not be called.')
def new_descriptor(self):
return self.matrix_descriptor_class(self.bd, self.gd, self)
def __repr__(self):
return uncamelcase(self.__class__.__name__)
def get_description(self):
"""Description of this object in prose, e.g. for logging.
Subclasses are expected to override this with something useful."""
return repr(self)
class BlacsLayouts(KohnShamLayouts):
using_blacs = True # This is only used by a regression test
def __init__(self, gd, bd, dtype, mcpus, ncpus, blocksize,
timer=nulltimer):
KohnShamLayouts.__init__(self, gd, bd, dtype, timer)
# WARNING: Do not create the BlacsGrid on a communicator which does not
# contain block_comm.rank = 0. This will break BlacsBandLayouts which
# assume eps_M will be broadcast over block_comm.
self.blockgrid = BlacsGrid(self.block_comm, mcpus, ncpus)
def get_description(self):
title = 'BLACS'
template = '%d x %d grid with %d x %d blocksize'
return (title, template)
class BandLayouts(KohnShamLayouts):
matrix_descriptor_class = BandMatrixDescriptor
def __init__(self, gd, bd, dtype, buffer_size=None, timer=nulltimer):
KohnShamLayouts.__init__(self, gd, bd, dtype, timer)
self.buffer_size = buffer_size
def diagonalize(self, H_NN, eps_n):
"""Serial diagonalizer must handle two cases:
1. Parallelization over domains only.
2. Simultaneous parallelization over domains and bands.
"""
nbands = self.bd.nbands
mynbands = self.bd.mynbands
eps_N = np.empty(nbands)
self.timer.start('Diagonalize')
# Broadcast on block_comm since result
# is k-point and spin-dependent only
self.block_comm.broadcast(H_NN, 0)
self._diagonalize(H_NN, eps_N)
self.timer.stop('Diagonalize')
self.timer.start('Distribute results')
# Copy the portion that belongs to my band group
eps_n[:] = eps_N[self.bd.get_slice()]
self.timer.stop('Distribute results')
def _diagonalize(self, H_NN, eps_N):
"""Serial diagonalize via LAPACK."""
# This is replicated computation but ultimately avoids
# additional communication.
diagonalize(H_NN, eps_N)
def inverse_cholesky(self, S_NN):
"""Serial inverse Cholesky must handle two cases:
1. Parallelization over domains only.
2. Simultaneous parallelization over domains and bands.
"""
self.timer.start('Inverse Cholesky')
# Broadcast on block_comm since result
# is k-point and spin-dependent only
self.block_comm.broadcast(S_NN, 0)
self._inverse_cholesky(S_NN)
self.timer.stop('Inverse Cholesky')
def _inverse_cholesky(self, S_NN):
"""Serial inverse Cholesky via LAPACK."""
# This is replicated computation but ultimately avoids
# additional communication.
inverse_cholesky(S_NN)
def get_description(self):
return 'Serial LAPACK'
class BlacsBandLayouts(BlacsLayouts): #XXX should derive from BandLayouts too!
"""ScaLAPACK Dense Linear Algebra.
This class is instantiated in the real-space code. Not for
casual use, at least for now.
Requires two distributors and three descriptors for initialization
as well as grid descriptors and band descriptors. Distributors are
for cols2blocks (1D -> 2D BLACS grid) and blocks2rows (2D -> 1D
BLACS grid). ScaLAPACK operations must occur on a 2D BLACS grid for
performance and scalability. Redistribute of 1D *column* layout
matrix will operate only on lower half of H or S. Redistribute of
2D block will operate on entire matrix for U, but only lower half
of C.
inverse_cholesky is "hard-coded" for real-space code.
Expects overlap matrix (S) and the coefficient matrix (C) to be a
replicated data structures and *not* created by the BLACS descriptor class.
This is due to the MPI_Reduce and MPI_Broadcast that will occur
in the parallel matrix multiply. Input matrices should be:
S = np.empty((nbands, mybands), dtype)
C = np.empty((mybands, nbands), dtype)
_standard_diagonalize is "hard-coded" for the real-space code.
Expects both hamiltonian (H) and eigenvector matrix (U) to be a
replicated data structures and not created by the BLACS descriptor class.
This is due to the MPI_Reduce and MPI_Broadcast that will occur
in the parallel matrix multiply. Input matrices should be:
H = np.empty((nbands, mynbands), dtype)
U = np.empty((mynbands, nbands), dtype)
eps_n = np.empty(mynbands, dtype = float)
""" #XXX rewrite this docstring a bit!
matrix_descriptor_class = BlacsBandMatrixDescriptor
# This class 'describes' all the realspace Blacs-related layouts
def __init__(self, gd, bd, dtype, mcpus, ncpus, blocksize,
buffer_size=None, timer=nulltimer):
BlacsLayouts.__init__(self, gd, bd, dtype, mcpus, ncpus, blocksize,
timer)
self.buffer_size = buffer_size
nbands = bd.nbands
mynbands = bd.mynbands
# 1D layout - columns
self.columngrid = BlacsGrid(self.column_comm, 1, bd.comm.size)
self.Nndescriptor = self.columngrid.new_descriptor(nbands, nbands,
nbands, mynbands)
# 2D layout
self.nndescriptor = self.blockgrid.new_descriptor(nbands, nbands,
blocksize, blocksize)
# 1D layout - rows
self.rowgrid = BlacsGrid(self.column_comm, bd.comm.size, 1)
self.nNdescriptor = self.rowgrid.new_descriptor(nbands, nbands,
mynbands, nbands)
# Only redistribute filled out half for Hermitian matrices
self.Nn2nn = Redistributor(self.block_comm, self.Nndescriptor,
self.nndescriptor)
#self.Nn2nn = Redistributor(self.block_comm, self.Nndescriptor,
# self.nndescriptor, 'L') #XXX faster but...
# Resulting matrix will be used in dgemm which is symmetry obvlious
self.nn2nN = Redistributor(self.block_comm, self.nndescriptor,
self.nNdescriptor)
def diagonalize(self, H_nn, eps_n):
nbands = self.bd.nbands
eps_N = np.empty(nbands)
self.timer.start('Diagonalize')
self._diagonalize(H_nn, eps_N)
self.timer.stop('Diagonalize')
self.timer.start('Distribute results')
# eps_N is already on block_comm.rank = 0
# easier to broadcast eps_N to all and
# get the correct slice afterward.
self.block_comm.broadcast(eps_N, 0)
eps_n[:] = eps_N[self.bd.get_slice()]
self.timer.stop('Distribute results')
def _diagonalize(self, H_nn, eps_N):
"""Parallel diagonalizer."""
self.nndescriptor.diagonalize_dc(H_nn.copy(), H_nn, eps_N, 'L')
def inverse_cholesky(self, S_nn):
self.timer.start('Inverse Cholesky')
self._inverse_cholesky(S_nn)
self.block_comm.barrier() # removing barrier may lead to race condition
self.timer.stop('Inverse Cholesky')
def _inverse_cholesky(self, S_nn):
self.nndescriptor.inverse_cholesky(S_nn, 'L')
def get_description(self):
(title, template) = BlacsLayouts.get_description(self)
bg = self.blockgrid
desc = self.nndescriptor
s = template % (bg.nprow, bg.npcol, desc.mb, desc.nb)
return ' '.join([title, s])
class BlacsOrbitalLayouts(BlacsLayouts):
"""ScaLAPACK Dense Linear Algebra.
This class is instantiated in LCAO. Not for casual use, at least for now.
Requires two distributors and three descriptors for initialization
as well as grid descriptors and band descriptors. Distributors are
for cols2blocks (1D -> 2D BLACS grid) and blocks2cols (2D -> 1D
BLACS grid). ScaLAPACK operations must occur on 2D BLACS grid for
performance and scalability.
_general_diagonalize is "hard-coded" for LCAO.
Expects both Hamiltonian and Overlap matrix to be on the 2D BLACS grid.
This is done early on to save memory.
"""
# XXX rewrite this docstring a bit!
# This class 'describes' all the LCAO Blacs-related layouts
def __init__(self, gd, bd, dtype, mcpus, ncpus, blocksize, nao,
timer=nulltimer):
BlacsLayouts.__init__(self, gd, bd, dtype, mcpus, ncpus, blocksize,
timer)
nbands = bd.nbands
mynbands = bd.mynbands
self.orbital_comm = self.bd.comm
naoblocksize = -((-nao) // self.orbital_comm.size)
self.nao = nao
# Range of basis functions for BLACS distribution of matrices:
self.Mmax = nao
self.Mstart = bd.comm.rank * naoblocksize
self.Mstop = min(self.Mstart + naoblocksize, self.Mmax)
self.mynao = self.Mstop - self.Mstart
# Column layout for one matrix per band rank:
self.columngrid = BlacsGrid(bd.comm, bd.comm.size, 1)
self.mMdescriptor = self.columngrid.new_descriptor(nao, nao,
naoblocksize, nao)
self.nMdescriptor = self.columngrid.new_descriptor(nbands, nao,
mynbands, nao)
#parallelprint(world, (mynao, self.mMdescriptor.shape))
# Column layout for one matrix in total (only on grid masters):
self.single_column_grid = BlacsGrid(self.column_comm, bd.comm.size, 1)
self.mM_unique_descriptor = self.single_column_grid.new_descriptor( \
nao, nao, naoblocksize, nao)
# nM_unique_descriptor is meant to hold the coefficients after
# diagonalization. BLACS requires it to be nao-by-nao, but
# we only fill meaningful data into the first nbands columns.
#
# The array will then be trimmed and broadcast across
# the grid descriptor's communicator.
self.nM_unique_descriptor = self.single_column_grid.new_descriptor( \
nbands, nao, mynbands, nao)
# Fully blocked grid for diagonalization with many CPUs:
self.mmdescriptor = self.blockgrid.new_descriptor(nao, nao, blocksize,
blocksize)
#self.nMdescriptor = nMdescriptor
self.mM2mm = Redistributor(self.block_comm, self.mM_unique_descriptor,
self.mmdescriptor)
self.mm2nM = Redistributor(self.block_comm, self.mmdescriptor,
self.nM_unique_descriptor)
def diagonalize(self, H_mm, C_nM, eps_n, S_mm):
# C_nM needs to be simultaneously compatible with:
# 1. outdescriptor
# 2. broadcast with gd.comm
# We will does this with a dummy buffer C2_nM
indescriptor = self.mM2mm.srcdescriptor # cols2blocks
outdescriptor = self.mm2nM.dstdescriptor # blocks2cols
blockdescriptor = self.mM2mm.dstdescriptor # cols2blocks
dtype = S_mm.dtype
eps_M = np.empty(C_nM.shape[-1]) # empty helps us debug
subM, subN = outdescriptor.gshape
C_mm = blockdescriptor.zeros(dtype=dtype)
self.timer.start('General diagonalize')
# general_diagonalize_ex may have a buffer overflow, so
# we no longer use it
#blockdescriptor.general_diagonalize_ex(H_mm, S_mm.copy(), C_mm, eps_M,
# UL='L', iu=self.bd.nbands)
blockdescriptor.general_diagonalize_dc(H_mm, S_mm.copy(), C_mm, eps_M,
UL='L')
self.timer.stop('General diagonalize')
# Make C_nM compatible with the redistributor
self.timer.start('Redistribute coefs')
if outdescriptor:
C2_nM = C_nM
else:
C2_nM = outdescriptor.empty(dtype=dtype)
assert outdescriptor.check(C2_nM)
self.mm2nM.redistribute(C_mm, C2_nM, subM, subN) # blocks2cols
self.timer.stop('Redistribute coefs')
self.timer.start('Send coefs to domains')
# eps_M is already on block_comm.rank = 0
# easier to broadcast eps_M to all and
# get the correct slice afterward.
self.block_comm.broadcast(eps_M, 0)
eps_n[:] = eps_M[self.bd.get_slice()]
self.gd.comm.broadcast(C_nM, 0)
self.timer.stop('Send coefs to domains')
def distribute_overlap_matrix(self, S_qmM, root=0,
add_hermitian_conjugate=False):
# Some MPI implementations need a lot of memory to do large
# reductions. To avoid trouble, we do comm.sum on smaller blocks
# of S (this code is also safe for arrays smaller than blocksize)
Sflat_x = S_qmM.ravel()
blocksize = 2**23 // Sflat_x.itemsize # 8 MiB
nblocks = -(-len(Sflat_x) // blocksize)
Mstart = 0
for i in range(nblocks):
self.gd.comm.sum(Sflat_x[Mstart:Mstart + blocksize], root=root)
Mstart += blocksize
assert Mstart + blocksize >= len(Sflat_x)
xshape = S_qmM.shape[:-2]
nm, nM = S_qmM.shape[-2:]
S_qmM = S_qmM.reshape(-1, nm, nM)
blockdesc = self.mmdescriptor
coldesc = self.mM_unique_descriptor
S_qmm = blockdesc.zeros(len(S_qmM), S_qmM.dtype)
if not coldesc: # XXX ugly way to sort out inactive ranks
S_qmM = coldesc.zeros(len(S_qmM), S_qmM.dtype)
self.timer.start('Distribute overlap matrix')
for S_mM, S_mm in zip(S_qmM, S_qmm):
self.mM2mm.redistribute(S_mM, S_mm)
if add_hermitian_conjugate:
if blockdesc.active:
pblas_tran(1.0, S_mm.copy(), 1.0, S_mm,
blockdesc, blockdesc)
self.timer.stop('Distribute overlap matrix')
return S_qmm.reshape(xshape + blockdesc.shape)
def get_overlap_matrix_shape(self):
return self.mmdescriptor.shape
def calculate_blocked_density_matrix(self, f_n, C_nM):
nbands = self.bd.nbands
mynbands = self.bd.mynbands
nao = self.nao
dtype = C_nM.dtype
self.nMdescriptor.checkassert(C_nM)
if self.gd.rank == 0:
Cf_nM = (C_nM * f_n[:, None]).conj()
else:
C_nM = self.nM_unique_descriptor.zeros(dtype=dtype)
Cf_nM = self.nM_unique_descriptor.zeros(dtype=dtype)
r = Redistributor(self.block_comm, self.nM_unique_descriptor,
self.mmdescriptor)
Cf_mm = self.mmdescriptor.zeros(dtype=dtype)
r.redistribute(Cf_nM, Cf_mm, nbands, nao)
del Cf_nM
C_mm = self.mmdescriptor.zeros(dtype=dtype)
r.redistribute(C_nM, C_mm, nbands, nao)
# no use to delete C_nM as it's in the input...
rho_mm = self.mmdescriptor.zeros(dtype=dtype)
pblas_simple_gemm(self.mmdescriptor,
self.mmdescriptor,
self.mmdescriptor,
Cf_mm, C_mm, rho_mm, transa='T')
return rho_mm
def calculate_density_matrix(self, f_n, C_nM, rho_mM=None):
"""Calculate density matrix from occupations and coefficients.
Presently this function performs the usual scalapack 3-step trick:
redistribute-numbercrunching-backdistribute.
Notes on future performance improvement.
As per the current framework, C_nM exists as copies on each
domain, i.e. this is not parallel over domains. We'd like to
correct this and have an efficient distribution using e.g. the
block communicator.
The diagonalization routine and other parts of the code should
however be changed to accommodate the following scheme:
Keep coefficients in C_mm form after the diagonalization.
rho_mm can then be directly calculated from C_mm without
redistribution, after which we only need to redistribute
rho_mm across domains.
"""
dtype = C_nM.dtype
rho_mm = self.calculate_blocked_density_matrix(f_n, C_nM)
rback = Redistributor(self.block_comm, self.mmdescriptor,
self.mM_unique_descriptor)
rho1_mM = self.mM_unique_descriptor.zeros(dtype=dtype)
rback.redistribute(rho_mm, rho1_mM)
del rho_mm
if rho_mM is None:
if self.gd.rank == 0:
rho_mM = rho1_mM
else:
rho_mM = self.mMdescriptor.zeros(dtype=dtype)
self.gd.comm.broadcast(rho_mM, 0)
return rho_mM
def distribute_to_columns(self, rho_mm, srcdescriptor):
redistributor = Redistributor(self.block_comm, # XXX
srcdescriptor,
self.mM_unique_descriptor)
rho_mM = redistributor.redistribute(rho_mm)
if self.gd.rank != 0:
rho_mM = self.mMdescriptor.zeros(dtype=rho_mm.dtype)
self.gd.comm.broadcast(rho_mM, 0)
return rho_mM
def oldcalculate_density_matrix(self, f_n, C_nM, rho_mM=None):
# This version is parallel over the band descriptor only.
# This is inefficient, but let's keep it for a while in case
# there's trouble with the more efficient version
nbands = self.bd.nbands
mynbands = self.bd.mynbands
nao = self.nao
if rho_mM is None:
rho_mM = self.mMdescriptor.zeros(dtype=C_nM.dtype)
Cf_nM = (C_nM * f_n[:, None]).conj()
pblas_simple_gemm(self.nMdescriptor, self.nMdescriptor,
self.mMdescriptor, Cf_nM, C_nM, rho_mM, transa='T')
return rho_mM
def get_transposed_density_matrix(self, f_n, C_nM, rho_mM=None):
return self.calculate_density_matrix(f_n, C_nM, rho_mM).conj()
def get_description(self):
(title, template) = BlacsLayouts.get_description(self)
bg = self.blockgrid
desc = self.mmdescriptor
s = template % (bg.nprow, bg.npcol, desc.mb, desc.nb)
return ' '.join([title, s])
class OrbitalLayouts(KohnShamLayouts):
def __init__(self, gd, bd, dtype, nao, timer=nulltimer):
KohnShamLayouts.__init__(self, gd, bd, dtype, timer)
self.mMdescriptor = MatrixDescriptor(nao, nao)
self.nMdescriptor = MatrixDescriptor(bd.mynbands, nao)
self.Mstart = 0
self.Mstop = nao
self.Mmax = nao
self.mynao = nao
self.nao = nao
self.orbital_comm = bd.comm
def diagonalize(self, H_MM, C_nM, eps_n, S_MM):
eps_M = np.empty(C_nM.shape[-1])
self.block_comm.broadcast(H_MM, 0)
self.block_comm.broadcast(S_MM, 0)
self._diagonalize(H_MM, S_MM.copy(), eps_M)
eps_n[:] = eps_M[self.bd.get_slice()]
C_nM[:] = H_MM[self.bd.get_slice()]
def _diagonalize(self, H_MM, S_MM, eps_M):
"""Serial diagonalize via LAPACK."""
# This is replicated computation but ultimately avoids
# additional communication
general_diagonalize(H_MM, eps_M, S_MM)
def estimate_memory(self, mem, dtype):
nao = self.setups.nao
itemsize = mem.itemsize[dtype]
mem.subnode('eps [M]', self.nao * mem.floatsize)
mem.subnode('H [MM]', self.nao * self.nao * itemsize)
def distribute_overlap_matrix(self, S_qMM, root=0,
add_hermitian_conjugate=False):
self.gd.comm.sum(S_qMM, root)
if add_hermitian_conjugate:
S_qMM += S_qMM.swapaxes(-1, -2).conj()
return S_qMM
def get_overlap_matrix_shape(self):
return self.nao, self.nao
def calculate_density_matrix(self, f_n, C_nM, rho_MM=None, C2_nM=None):
# Only a madman would use a non-transposed density matrix.
# Maybe we should use the get_transposed_density_matrix instead
if rho_MM is None:
rho_MM = np.zeros((self.mynao, self.nao), dtype=C_nM.dtype)
# XXX Should not conjugate, but call gemm(..., 'c')
# Although that requires knowing C_Mn and not C_nM.
# that also conforms better to the usual conventions in literature
if C2_nM is None:
C2_nM = C_nM
Cf_Mn = np.ascontiguousarray(C2_nM.T.conj() * f_n)
gemm(1.0, C_nM, Cf_Mn, 0.0, rho_MM, 'n')
return rho_MM
def get_transposed_density_matrix(self, f_n, C_nM, rho_MM=None):
return self.calculate_density_matrix(f_n, C_nM, rho_MM).T.copy()
#if rho_MM is None:
# rho_MM = np.zeros((self.mynao, self.nao), dtype=C_nM.dtype)
#C_Mn = C_nM.T.copy()
#gemm(1.0, C_Mn, f_n[np.newaxis, :] * C_Mn, 0.0, rho_MM, 'c')
#self.bd.comm.sum(rho_MM)
#return rho_MM
def alternative_calculate_density_matrix(self, f_n, C_nM, rho_MM=None):
if rho_MM is None:
rho_MM = np.zeros((self.mynao, self.nao), dtype=C_nM.dtype)
# Alternative suggestion. Might be faster. Someone should test this
C_Mn = C_nM.T.copy()
r2k(0.5, C_Mn, f_n * C_Mn, 0.0, rho_MM)
tri2full(rho_MM)
return rho_MM
def get_description(self):
return 'Serial LAPACK'
def calculate_density_matrix_delta(self, d_nn, C_nM, rho_MM=None):
# Only a madman would use a non-transposed density matrix.
# Maybe we should use the get_transposed_density_matrix instead
if rho_MM is None:
rho_MM = np.zeros((self.mynao, self.nao), dtype=C_nM.dtype)
Cd_Mn = np.zeros((self.nao, self.bd.mynbands), dtype=C_nM.dtype)
# XXX Should not conjugate, but call gemm(..., 'c')
# Although that requires knowing C_Mn and not C_nM.
# that also conforms better to the usual conventions in literature
C_Mn = C_nM.T.conj().copy()
gemm(1.0, d_nn, C_Mn, 0.0, Cd_Mn, 'n')
gemm(1.0, C_nM, Cd_Mn, 0.0, rho_MM, 'n')
self.bd.comm.sum(rho_MM)
return rho_MM
def get_transposed_density_matrix_delta(self, d_nn, C_nM, rho_MM=None):
return self.calculate_density_matrix_delta(d_nn, C_nM, rho_MM).T.copy()
|
ajylee/gpaw-rtxs
|
gpaw/kohnsham_layouts.py
|
Python
|
gpl-3.0
| 25,588
|
[
"GPAW"
] |
a1d36321222e10ae0c02321db1e2954750d4b926d7820d323ca36338d45db954
|
from __future__ import print_function, absolute_import
import modeller
from allosmod.modeller import _truncated_gaussian
class TruncatedGaussian(modeller.forms.RestraintForm):
"""AllosMod truncated Gaussian restraint.
This is implemented as a C extension (_truncated_gaussian.so)
to Modeller."""
_builtin_index = _truncated_gaussian.truncated_gaussian_create()
def __init__(self, group, feature, dele_max, slope, scl_delx, weights,
means, stdevs):
lv = -1
for var in (weights, means, stdevs):
if (lv >= 0 and lv != len(var)) \
or not isinstance(var, (tuple, list)):
raise TypeError("weights, means and stdevs should all be "
"sequences of the same length")
lv = len(var)
modeller.forms.RestraintForm.__init__(
self, group, feature, len(weights),
(dele_max, slope, scl_delx) + tuple(weights) + tuple(means)
+ tuple(stdevs))
|
salilab/allosmod-lib
|
lib/allosmod/modeller/forms.py
|
Python
|
lgpl-2.1
| 1,018
|
[
"Gaussian"
] |
5e63a3f03229cb231dce831b6f1ba0a8af70e07a4377f95eca708f43ca189dba
|
import glob
import hashlib
import os
from subprocess import check_call
import h5py
import openmc
import pytest
from tests.testing_harness import TestHarness
from tests.regression_tests import config
vtk = pytest.importorskip('vtk')
class PlotVoxelTestHarness(TestHarness):
"""Specialized TestHarness for running OpenMC voxel plot tests."""
def __init__(self, plot_names):
super().__init__(None)
self._plot_names = plot_names
def _run_openmc(self):
openmc.plot_geometry(openmc_exec=config['exe'])
check_call(['../../../scripts/openmc-voxel-to-vtk'] +
glob.glob('plot_4.h5'))
def _test_output_created(self):
"""Make sure *.ppm has been created."""
for fname in self._plot_names:
assert os.path.exists(fname), 'Plot output file does not exist.'
def _cleanup(self):
super()._cleanup()
for fname in self._plot_names:
if os.path.exists(fname):
os.remove(fname)
def _get_results(self):
"""Return a string hash of the plot files."""
outstr = bytes()
for fname in self._plot_names:
if fname.endswith('.h5'):
# Add voxel data to results
with h5py.File(fname, 'r') as fh:
outstr += fh.attrs['filetype']
outstr += fh.attrs['num_voxels'].tostring()
outstr += fh.attrs['lower_left'].tostring()
outstr += fh.attrs['voxel_width'].tostring()
outstr += fh['data'].value.tostring()
# Hash the information and return.
sha512 = hashlib.sha512()
sha512.update(outstr)
outstr = sha512.hexdigest()
return outstr
def test_plot_voxel():
harness = PlotVoxelTestHarness(('plot_4.h5', 'plot.vti'))
harness.main()
|
liangjg/openmc
|
tests/regression_tests/plot_voxel/test.py
|
Python
|
mit
| 1,877
|
[
"VTK"
] |
ca4050e72c841d1eda1b6f6697cce0a5b09640336c9ea558bf11911fc0cbd580
|
########################################################################
#
# (C) 2013, James Cammarata <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import ansible.constants as C
from ansible.errors import AnsibleError
from ansible.galaxy.token import GalaxyToken
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils.six.moves.urllib.parse import quote as urlquote, urlencode
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.urls import open_url
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
def g_connect(method):
''' wrapper to lazily initialize connection info to galaxy '''
def wrapped(self, *args, **kwargs):
if not self.initialized:
display.vvvv("Initial connection to galaxy_server: %s" % self._api_server)
server_version = self._get_server_api_version()
if server_version not in self.SUPPORTED_VERSIONS:
raise AnsibleError("Unsupported Galaxy server API version: %s" % server_version)
self.baseurl = '%s/api/%s' % (self._api_server, server_version)
self.version = server_version # for future use
display.vvvv("Base API: %s" % self.baseurl)
self.initialized = True
return method(self, *args, **kwargs)
return wrapped
class GalaxyAPI(object):
''' This class is meant to be used as a API client for an Ansible Galaxy server '''
SUPPORTED_VERSIONS = ['v1']
def __init__(self, galaxy):
self.galaxy = galaxy
self.token = GalaxyToken()
self._api_server = C.GALAXY_SERVER
self._validate_certs = not galaxy.options.ignore_certs
self.baseurl = None
self.version = None
self.initialized = False
display.debug('Validate TLS certificates: %s' % self._validate_certs)
# set the API server
if galaxy.options.api_server != C.GALAXY_SERVER:
self._api_server = galaxy.options.api_server
def __auth_header(self):
token = self.token.get()
if token is None:
raise AnsibleError("No access token. You must first use login to authenticate and obtain an access token.")
return {'Authorization': 'Token ' + token}
@g_connect
def __call_galaxy(self, url, args=None, headers=None, method=None):
if args and not headers:
headers = self.__auth_header()
try:
display.vvv(url)
resp = open_url(url, data=args, validate_certs=self._validate_certs, headers=headers, method=method,
timeout=20)
data = json.loads(to_text(resp.read(), errors='surrogate_or_strict'))
except HTTPError as e:
res = json.loads(to_text(e.fp.read(), errors='surrogate_or_strict'))
raise AnsibleError(res['detail'])
return data
@property
def api_server(self):
return self._api_server
@property
def validate_certs(self):
return self._validate_certs
def _get_server_api_version(self):
"""
Fetches the Galaxy API current version to ensure
the API server is up and reachable.
"""
url = '%s/api/' % self._api_server
try:
return_data = open_url(url, validate_certs=self._validate_certs)
except Exception as e:
raise AnsibleError("Failed to get data from the API server (%s): %s " % (url, to_native(e)))
try:
data = json.loads(to_text(return_data.read(), errors='surrogate_or_strict'))
except Exception as e:
raise AnsibleError("Could not process data from the API server (%s): %s " % (url, to_native(e)))
if 'current_version' not in data:
raise AnsibleError("missing required 'current_version' from server response (%s)" % url)
return data['current_version']
@g_connect
def authenticate(self, github_token):
"""
Retrieve an authentication token
"""
url = '%s/tokens/' % self.baseurl
args = urlencode({"github_token": github_token})
resp = open_url(url, data=args, validate_certs=self._validate_certs, method="POST")
data = json.loads(to_text(resp.read(), errors='surrogate_or_strict'))
return data
@g_connect
def create_import_task(self, github_user, github_repo, reference=None, role_name=None):
"""
Post an import request
"""
url = '%s/imports/' % self.baseurl
args = {
"github_user": github_user,
"github_repo": github_repo,
"github_reference": reference if reference else ""
}
if role_name:
args['alternate_role_name'] = role_name
elif github_repo.startswith('ansible-role'):
args['alternate_role_name'] = github_repo[len('ansible-role')+1:]
data = self.__call_galaxy(url, args=urlencode(args))
if data.get('results', None):
return data['results']
return data
@g_connect
def get_import_task(self, task_id=None, github_user=None, github_repo=None):
"""
Check the status of an import task.
"""
url = '%s/imports/' % self.baseurl
if task_id is not None:
url = "%s?id=%d" % (url,task_id)
elif github_user is not None and github_repo is not None:
url = "%s?github_user=%s&github_repo=%s" % (url,github_user,github_repo)
else:
raise AnsibleError("Expected task_id or github_user and github_repo")
data = self.__call_galaxy(url)
return data['results']
@g_connect
def lookup_role_by_name(self, role_name, notify=True):
"""
Find a role by name.
"""
role_name = urlquote(role_name)
try:
parts = role_name.split(".")
user_name = ".".join(parts[0:-1])
role_name = parts[-1]
if notify:
display.display("- downloading role '%s', owned by %s" % (role_name, user_name))
except:
raise AnsibleError("Invalid role name (%s). Specify role as format: username.rolename" % role_name)
url = '%s/roles/?owner__username=%s&name=%s' % (self.baseurl, user_name, role_name)
data = self.__call_galaxy(url)
if len(data["results"]) != 0:
return data["results"][0]
return None
@g_connect
def fetch_role_related(self, related, role_id):
"""
Fetch the list of related items for the given role.
The url comes from the 'related' field of the role.
"""
try:
url = '%s/roles/%d/%s/?page_size=50' % (self.baseurl, int(role_id), related)
data = self.__call_galaxy(url)
results = data['results']
done = (data.get('next_link', None) is None)
while not done:
url = '%s%s' % (self._api_server, data['next_link'])
data = self.__call_galaxy(url)
results += data['results']
done = (data.get('next_link', None) is None)
return results
except:
return None
@g_connect
def get_list(self, what):
"""
Fetch the list of items specified.
"""
try:
url = '%s/%s/?page_size' % (self.baseurl, what)
data = self.__call_galaxy(url)
if "results" in data:
results = data['results']
else:
results = data
done = True
if "next" in data:
done = (data.get('next_link', None) is None)
while not done:
url = '%s%s' % (self._api_server, data['next_link'])
data = self.__call_galaxy(url)
results += data['results']
done = (data.get('next_link', None) is None)
return results
except Exception as error:
raise AnsibleError("Failed to download the %s list: %s" % (what, str(error)))
@g_connect
def search_roles(self, search, **kwargs):
search_url = self.baseurl + '/search/roles/?'
if search:
search_url += '&autocomplete=' + urlquote(search)
tags = kwargs.get('tags',None)
platforms = kwargs.get('platforms', None)
page_size = kwargs.get('page_size', None)
author = kwargs.get('author', None)
if tags and isinstance(tags, string_types):
tags = tags.split(',')
search_url += '&tags_autocomplete=' + '+'.join(tags)
if platforms and isinstance(platforms, string_types):
platforms = platforms.split(',')
search_url += '&platforms_autocomplete=' + '+'.join(platforms)
if page_size:
search_url += '&page_size=%s' % page_size
if author:
search_url += '&username_autocomplete=%s' % author
data = self.__call_galaxy(search_url)
return data
@g_connect
def add_secret(self, source, github_user, github_repo, secret):
url = "%s/notification_secrets/" % self.baseurl
args = urlencode({
"source": source,
"github_user": github_user,
"github_repo": github_repo,
"secret": secret
})
data = self.__call_galaxy(url, args=args)
return data
@g_connect
def list_secrets(self):
url = "%s/notification_secrets" % self.baseurl
data = self.__call_galaxy(url, headers=self.__auth_header())
return data
@g_connect
def remove_secret(self, secret_id):
url = "%s/notification_secrets/%s/" % (self.baseurl, secret_id)
data = self.__call_galaxy(url, headers=self.__auth_header(), method='DELETE')
return data
@g_connect
def delete_role(self, github_user, github_repo):
url = "%s/removerole/?github_user=%s&github_repo=%s" % (self.baseurl,github_user,github_repo)
data = self.__call_galaxy(url, headers=self.__auth_header(), method='DELETE')
return data
|
j00bar/ansible
|
lib/ansible/galaxy/api.py
|
Python
|
gpl-3.0
| 11,030
|
[
"Galaxy"
] |
ac84f6d36ad39338ac65067db0e24aaed48d70054d9c8bb1851c2e234f121ad3
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import numpy as np
from ctypes import c_char, c_int, sizeof, c_double
# Maybe even not necessary after all...
#from pyscf.lib import misc
#libopenmx = misc.load_library("libopenmx")
#libopenmx.openmx_unpack.argtypes = (c_int, c_char_p[])
#
#
#
def openmx_import_scfout(self, **kw):
""" Calls libopenmx to get the data and interpret it then """
import struct
from pyscf.nao.m_openmx_mat import openmx_mat_c
#label, cd
self.label = label = kw['openmx'] if 'openmx' in kw else 'openmx'
self.cd = cd = kw['cd'] if 'cd' in kw else '.'
fname = cd+'/'+label+'.scfout'
with open(fname, mode='rb') as f: # b is important -> binary
header = f.read(6*sizeof(c_int))
(natoms,SpinP_switch,Catomnum,Latomnum,Ratomnum,TCpyCell) = struct.unpack("@6i", header)
assert natoms>0
assert SpinP_switch>-1
#print(natoms,SpinP_switch,Catomnum,Latomnum,Ratomnum,TCpyCell)
atv = np.fromfile(f, count=(TCpyCell+1)*4).reshape((TCpyCell+1,4))
#print(atv)
atv_ijk = np.fromfile(f, count=(TCpyCell+1)*4, dtype=c_int).reshape((TCpyCell+1,4))
#print(atv_ijk)
Total_NumOrbs = np.ones(natoms+1, dtype=c_int)
Total_NumOrbs[1:] = np.fromfile(f, count=natoms, dtype=c_int)
#print('Total_NumOrbs')
#print(Total_NumOrbs)
FNAN = np.zeros(natoms+1, dtype=c_int)
FNAN[1:] = np.fromfile(f, count=natoms, dtype=c_int)
#print('FNAN')
#print(FNAN, max(FNAN))
natn = np.zeros((natoms+1,max(FNAN)+1), dtype=c_int)
ncn = np.zeros((natoms+1,max(FNAN)+1), dtype=c_int)
for iatom,count in enumerate(FNAN[1:]): natn[iatom+1,:] = np.fromfile(f, count=count+1, dtype=c_int)
for iatom,count in enumerate(FNAN[1:]): ncn[iatom+1,:] = np.fromfile(f, count=count+1, dtype=c_int)
#print('natn ')
#print(natn)
#print('ncn ')
#print(ncn)
tv = np.zeros((4,4))
for i in range(3): tv[i+1,:] = np.fromfile(f, count=4)
rtv = np.zeros((4,4))
for i in range(3): rtv[i+1,:] = np.fromfile(f, count=4)
#print('tv ', tv)
#print('rtv ', rtv)
Gxyz = np.fromfile(f, count=natoms*4).reshape((natoms,4))
#print('Gxyz ')
#print(Gxyz)
omm = openmx_mat_c(natoms, Total_NumOrbs, FNAN, natn)
Hks = np.zeros([SpinP_switch+1]+omm.get_dims())
for spin in range(SpinP_switch+1): omm.fromfile(f, out=Hks[spin])
if SpinP_switch==3:
iHks = np.zeros([3]+omm.get_dims())
for spin in range(3): omm.fromfile(f, out=iHks[spin])
OLP = omm.fromfile(f)
OLPx = omm.fromfile(f)
OLPy = omm.fromfile(f)
OLPz = omm.fromfile(f)
DM = np.zeros([SpinP_switch+1]+omm.get_dims())
for spin in range(SpinP_switch+1): omm.fromfile(f,out=DM[spin])
solver = struct.unpack("@i", f.read(1*sizeof(c_int)))[0]
dipole_moment_core = np.zeros(3)
dipole_moment_background = np.zeros(3)
ChemP,E_Temp,\
dipole_moment_core[0],dipole_moment_core[1],dipole_moment_core[2],\
dipole_moment_background[0],dipole_moment_background[1],dipole_moment_background[2],\
Valence_Electrons,Total_SpinS = struct.unpack("@10d", f.read(10*sizeof(c_double)))
#print(solver)
#print(ChemP,E_Temp)
#print(dipole_moment_core)
#print(dipole_moment_background)
#print(Valence_Electrons)
#print(Total_SpinS)
nlines_input = struct.unpack("@i", f.read(1*sizeof(c_int)))[0]
input_file = []
for line in range(nlines_input):
input_file.append(str(struct.unpack("@256s", f.read(256*sizeof(c_char)))[0]))
self.natm=self.natoms=natoms
self.nspin = SpinP_switch+1
self.ucell = tv[1:4,1:4]
self.atom2coord = Gxyz[:,0:3]
self.atom2s = np.zeros((self.natm+1), dtype=np.int)
for atom,norb in enumerate(Total_NumOrbs[1:]): self.atom2s[atom+1]=self.atom2s[atom]+norb
self.norbs = self.atom2s[-1]
self.nkpoints = 1
#print(self.atom2s)
#print(self.norbs)
#print(self.nspin)
return self
|
gkc1000/pyscf
|
pyscf/nao/m_openmx_import_scfout.py
|
Python
|
apache-2.0
| 4,547
|
[
"OpenMX",
"PySCF"
] |
22edb83662a04b38afac9340f05eab602864c8c9c6055f035880956ed5374fa5
|
# -*- coding: utf-8 -*-
"""Timesketch enhancer that exports Timesketch results."""
import time
from typing import List, Optional, TYPE_CHECKING
from dftimewolf.lib import module
from dftimewolf.lib import timesketch_utils
from dftimewolf.lib import utils
from dftimewolf.lib.containers import containers
from dftimewolf.lib.modules import manager as modules_manager
if TYPE_CHECKING:
from dftimewolf.lib import state as dftw_state
from timesketch_api_client import client
from timesketch_api_client import sketch as ts_sketch
from timesketch_api_client import story as ts_story
from timesketch_api_client import search as ts_search
from timesketch_api_client import aggregation as ts_aggregation
class TimesketchEnhancer(module.BaseModule):
"""Enhance Timesketch results with additional reports.
Attributes:
timesketch_api (TimesketchApiClient): Timesketch API client.
"""
# The name of a ticket attribute that contains the URL to a sketch.
_SKETCH_ATTRIBUTE_NAME = 'Timesketch URL'
# Number of seconds the exporter sleeps between checking analyzer status.
_ANALYZER_SECONDS_BETWEEN_CHECK = 3
# Maximum number of wait cycles before bailing out waiting for analyzer runs.
_ANALYZER_MAX_CHECKS = 60
_ANALYZERS_COMPLETE_SET = frozenset(['ERROR', 'DONE'])
# Name given to all report containers.
_REPORT_NAME = 'TimesketchEnhancer'
def __init__(self,
state: "dftw_state.DFTimewolfState",
name: Optional[str]=None,
critical: bool=False):
super(TimesketchEnhancer, self).__init__(
state, name=name, critical=critical)
self.timesketch_api: "client.TimesketchApi"
self._aggregations_to_skip = [] # type: List[str]
self._formatter: utils.FormatterInterface
self._include_stories = False
self._max_checks = self._ANALYZER_MAX_CHECKS
self._wait_for_analyzers = True
self._searches_to_skip = [] # type: List[str]
def SetUp(self, # pylint: disable=arguments-differ
wait_for_analyzers: bool=True,
searches_to_skip: str='',
aggregations_to_skip: str='',
include_stories: bool=False,
token_password: str='',
max_checks: int=0,
formatter: str='html') -> None:
"""Sets up a Timesketch Enhancer module.
Args:
wait_for_analyzers (bool): If set to True then the enhancer will wait
until all analyzers are done running. If set to False, the module
will be skipped, since it does not wait for any results. Defaults to
True.
searches_to_skip (str): A comma separated string with a list of names of
saved searches that are not to be included when generating reports.
aggregations_to_skip (str): A comma separated string with a list of
Aggregation names that are not to be included when generating
reports.
include_stories (bool): If set to True then story content will be
dumped into a report, otherwise stories will be ignored. Defaults
to False.
token_password (str): optional password used to decrypt the
Timesketch credential storage. Defaults to an empty string since
the upstream library expects a string value. An empty string means
a password will be generated by the upstream library.
max_checks (int): The enhancer will wait for analyzers to complete before
attempting to collect data from Timesketch. The tool waits 3 seconds
before each check, and by default the number of checks is 60, meaning
that the module will wait at most 180 seconds before continuing. This
may not be enough time to complete all the work needed, if more time
is needed max_checks can be increased.
formatter (str): optional string defining the formatting class that will
be used for text formatting in reports. Valid options are:
"html" or "markdown", defaults to "html".
"""
self.timesketch_api = timesketch_utils.GetApiClient(
self.state, token_password=token_password)
if not (self.timesketch_api or self.timesketch_api.session):
self.ModuleError(
'Unable to get a Timesketch API client, try deleting the files '
'~/.timesketchrc and ~/.timesketch.token', critical=True)
if max_checks:
self._max_checks = int(max_checks)
self._include_stories = include_stories
self._wait_for_analyzers = wait_for_analyzers
if aggregations_to_skip:
self._aggregations_to_skip = [
x.strip() for x in aggregations_to_skip.split(',')]
if searches_to_skip:
self._searches_to_skip = [x.strip() for x in searches_to_skip.split(',')]
if formatter.lower() == 'markdown':
self._formatter = utils.MarkdownFormatter()
else:
self._formatter = utils.HTMLFormatter()
def _GetSketchURL(self, sketch: "ts_sketch.Sketch") -> str:
"""Returns a URL to access a sketch."""
api_root = sketch.api.api_root
ts_url, _, _ = api_root.partition('/api/v1')
return '{0:s}/sketch/{1:d}/'.format(ts_url, sketch.id)
def _GenerateAggregationString(
self, aggregations: List["ts_aggregation.Aggregation"]) -> str:
"""Returns a string with aggregation data.
The function runs through all saved aggregations in a sketch
and returns back a formatted string (using the formatter)
with the results of the run.
Args:
aggregations (list): a list of aggregation objects (Aggregation).
Returns:
str: A formatted string with the results of aggregation runs
on the sketch.
"""
aggregation_strings = []
for aggregation in aggregations:
if aggregation.name in self._aggregations_to_skip:
continue
data_frame = aggregation.table
if data_frame.empty:
continue
aggregation_strings.append(self._formatter.IndentText(
'{0:s}: {1:s}'.format(aggregation.name, aggregation.description),
level=2))
return '\n'.join(aggregation_strings)
def _ProcessAggregations(
self, aggregations: List["ts_aggregation.Aggregation"]) -> None:
"""Extract and store dataframes from aggregations as containers.
The function runs through all saved aggregations in a sketch
and extracts DataFrames from them. The data frames are stored
as containers, so that other modules can make use of them.
Args:
aggregations (list): a list of aggregation objects (Aggregation).
"""
for aggregation in aggregations:
if aggregation.name in self._aggregations_to_skip:
continue
data_frame = aggregation.table
if data_frame.empty:
continue
data_frame.drop(['bucket_name'], axis=1, inplace=True)
columns = list(data_frame.columns)
# We are presenting aggregations here, which is a table that consists of
# a column name and then the count. For easier reading in reports we want
# the count to be the last column displayed. In case the aggregation
# does not have a column named "count" a ValueError will get raised,
# in those cases we don't want to modify the data frame.
try:
count_index = columns.index('count')
count = columns.pop(count_index)
columns.append(count)
except ValueError:
pass
self.state.StoreContainer(containers.DataFrame(
data_frame=data_frame[columns],
description='Timesketch Aggregation: {0:s}'.format(
aggregation.name), name=self._REPORT_NAME))
def _GenerateStoryString(
self, stories: List["ts_story.Story"], sketch_url: str) -> str:
"""Returns a string with story data.
The function runs through all saved stories in a sketch and returns
back a formatted string with an overview of all stored stories.
Args:
stories (list): a list of Story objects (timesketch_api.story.Story).
sketch_url (str): the full URL to the sketch.
Returns:
str: A formatted string with the results of all stories stored in
the sketch.
"""
story_strings = []
for story in stories:
story_url = '{0:s}story/{1:d}'.format(sketch_url, story.id)
story_strings.append(self._formatter.IndentText(
self._formatter.Link(url=story_url, text=story.title), level=2))
return '\n'.join(story_strings)
def _ProcessStories(self, stories: List["ts_story.Story"]) -> None:
"""Extracts story content from a list of stories and saves as a report.
The function runs through all saved stories in a sketch and adds a
formatted version of the story as a report container.
Args:
stories (list): a list of Story objects (timesketch_api.story.Story).
"""
for story in stories:
if self._formatter.FORMAT == 'html':
story_string = story.to_html()
elif self._formatter.FORMAT == 'markdown':
story_string = story.to_markdown()
else:
story_string = story.to_export_format(self._formatter.FORMAT)
self.state.StoreContainer(containers.Report(
module_name='TimesketchEnhancer',
text_format=self._formatter.FORMAT,
text=story_string))
def _GenerateSavedSearchString(
self, saved_searches: List["ts_search.Search"], sketch_url: str) -> str:
"""Returns a string with saved search data.
The function runs through all saved searches in a sketch and returns
back a formatted string with the results of the run.
Args:
saved_searches (list): a list of Search objects
(timesketch_api.search.Search).
sketch_url (str): the full URL to the sketch.
Returns:
str: A formatted string with the results of the saved searches in
the sketch.
"""
search_strings = []
for saved_search in saved_searches:
if saved_search.name in self._searches_to_skip:
continue
# We only want to include automatically generated saved searches
# from analyzers.
if saved_search.user != 'System':
continue
search_url = '{0:s}explore?view={1:d}'.format(
sketch_url, saved_search.id)
search_strings.append(self._formatter.IndentText(
self._formatter.Link(url=search_url, text=saved_search.name),
level=2))
return '\n'.join(search_strings)
def _ProcessSavedSearches(
self, saved_searches: List["ts_search.Search"]) -> None:
"""Extract events from saved searches and store results as a container.
The function runs through all saved searches in a sketch and queries
the datastore for all events that match it and the results as a
dataframe container to the state object.
Args:
saved_searches (list): a list of Search
objects (timesketch_api.search.Search).
"""
for saved_search in saved_searches:
if saved_search.name in self._searches_to_skip:
continue
# We only want to include automatically generated searches from
# analyzers.
if saved_search.user != 'System':
continue
data_frame = saved_search.table
if data_frame.empty:
continue
# Clean up the data frame, remove Timesketch specific columns.
ts_columns = [x for x in data_frame.columns if x.startswith('_')]
# Remove all columns from data frame which exist ts_columns list.
data_frame.drop(ts_columns, axis=1, inplace=True)
columns = list(data_frame.columns)
# Move the datetime column to the first column displayed.
try:
index = columns.index('datetime')
datetime = columns.pop(index)
columns.insert(0, datetime)
except ValueError:
pass
self.state.StoreContainer(
containers.DataFrame(
data_frame=data_frame[columns],
name=self._REPORT_NAME,
description='Timesketch Saved Search: {0:s} - {1:s}'.format(
saved_search.name, saved_search.description)))
def _WaitForAnalyzers(self, sketch: "ts_sketch.Sketch") -> None:
"""Wait for all analyzers to complete their run.
Args:
sketch (timesketch_api.sketch.Sketch): the sketch object.
"""
check_number = 0
while True:
if check_number >= self._max_checks:
self.logger.warning(
'Exceeded maximum checks, not waiting any longer for analyzers '
'to complete.')
break
status_set = set()
# get_analyzer_status returns a dict with information about the run
# or all analyzers in a given sketch. One of the information is the
# current status of the analyzer run, ANALYZER_COMPLETE_SET contains
# the status values of analyzers that have completed their work.
for result in sketch.get_analyzer_status():
status_set.add(result.get('status', 'N/A'))
if status_set.issubset(self._ANALYZERS_COMPLETE_SET):
break
check_number += 1
time.sleep(self._ANALYZER_SECONDS_BETWEEN_CHECK)
def Process(self) -> None:
"""Executes a Timesketch enhancer module."""
if not self._wait_for_analyzers:
self.logger.warning(
'Not waiting for analyzers to run, skipping enhancer.')
return
if not self.timesketch_api:
message = 'Could not connect to Timesketch server'
self.ModuleError(message, critical=True)
sketch = self.state.GetFromCache('timesketch_sketch')
if not sketch:
message = (
'Sketch not found in cache, maybe the previous module was unable '
'to connect to Timesketch or unable to connect to and/or create '
'a sketch.')
self.ModuleError(message, critical=True)
self.logger.info('Waiting for analyzers to complete their run.')
summary_lines = [self._formatter.Heading('Timesketch Run', level=1)]
summary_lines.append(self._formatter.Paragraph(
'This is a summary of actions taken by Timesketch '
'during its run.'))
summary_lines.append(self._formatter.Paragraph(
'To visit the sketch, click {0:s}'.format(self._formatter.Link(
url=self._GetSketchURL(sketch), text='here'))))
summary_lines.append(self._formatter.Paragraph(
'Here is an overview of actions taken:'))
self._WaitForAnalyzers(sketch)
# Force a refresh of sketch data.
_ = sketch.lazyload_data(refresh_cache=True)
summary_lines.append(self._formatter.IndentStart())
saved_searches = sketch.list_saved_searches()
self._ProcessSavedSearches(saved_searches)
sketch_url = self._GetSketchURL(sketch)
search_string = self._GenerateSavedSearchString(
saved_searches, sketch_url)
formatted_string = ''
if search_string:
formatted_string = self._formatter.IndentText(
'The following saved searches were discovered:\n'
'{0:s}{1:s}{2:s}'.format(
self._formatter.IndentStart(),
search_string,
self._formatter.IndentEnd()))
else:
formatted_string = self._formatter.IndentText(
'Analyzers didn\'t save any searches.')
summary_lines.append(formatted_string)
aggregations = sketch.list_aggregations(
exclude_labels=['informational'])
self._ProcessAggregations(aggregations)
aggregation_string = self._GenerateAggregationString(aggregations)
if aggregation_string:
formatted_string = self._formatter.IndentText(
'The following aggregations were discovered:'
'\n{0:s}{1:s}{2:s}'.format(
self._formatter.IndentStart(),
aggregation_string,
self._formatter.IndentEnd()))
else:
formatted_string = self._formatter.IndentText(
'No aggregations were generated by analyzers.')
summary_lines.append(formatted_string)
stories = sketch.list_stories()
if self._include_stories:
self._ProcessStories(stories)
story_string = self._GenerateStoryString(stories, sketch_url)
if story_string:
formatted_string = self._formatter.IndentText(
'The following stories were generated:\n{0:s}{1:s}{2:s}'.format(
self._formatter.IndentStart(),
story_string,
self._formatter.IndentEnd()))
else:
formatted_string = self._formatter.IndentText(
'No stories were generated by analyzers.')
summary_lines.append(formatted_string)
summary_lines.append(self._formatter.IndentEnd())
analyzer_results = sketch.get_analyzer_status(as_sessions=True)
if analyzer_results:
line_string = self._formatter.Line()
summary_lines.append(line_string)
paragraph = self._formatter.Paragraph(
'Information from analyzer run:')
summary_lines.append(paragraph)
indent = self._formatter.IndentStart()
summary_lines.append(indent)
completed_ids = set()
for result in analyzer_results:
if result.id in completed_ids:
continue
if result.log:
log_text = self._formatter.IndentText(
'Logs: {0:s}'.format(result.log), level=2)
else:
log_text = ''
formatted_string = self._formatter.IndentText(
'ID: {0:d}\n{1:s}{2:s}\n{3:s}{4:s}'.format(
result.id,
self._formatter.IndentStart(),
'\n'.join([self._formatter.IndentText(
x.strip(), level=2) for x in result.results.split('\n')]),
log_text,
self._formatter.IndentEnd()
)
)
summary_lines.append(formatted_string)
completed_ids.add(result.id)
summary_lines.append(self._formatter.IndentEnd())
report_attributes = [{'update_comment': True}]
self.state.StoreContainer(containers.Report(
module_name='TimesketchEnhancer', text_format=self._formatter.FORMAT,
text='\n'.join(summary_lines), attributes=report_attributes))
self.logger.info('Analyzer reports generated')
modules_manager.ModulesManager.RegisterModule(TimesketchEnhancer)
|
log2timeline/dftimewolf
|
dftimewolf/lib/enhancers/timesketch.py
|
Python
|
apache-2.0
| 18,046
|
[
"VisIt"
] |
a5f968240c29a30950521379b98afe80037b471d639ace0914d844c3a2591a38
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Development script to get the multiplicity of the separation facets for some model coordination environments
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
if __name__ == '__main__':
allcg = AllCoordinationGeometries()
cg_symbol = 'I:12'
all_plane_points = []
cg = allcg[cg_symbol]
# I:12
if cg_symbol == 'I:12':
opposite_points = {0: 3,
1: 2,
2: 1,
3: 0,
4: 7,
5: 6,
6: 5,
7: 4,
8: 11,
9: 10,
10: 9,
11: 8
}
edges = cg._edges
for edge in edges:
opposite_edge = [opposite_points[edge[0]], opposite_points[edge[1]]]
equiv_plane = list(edge)
equiv_plane.extend(opposite_edge)
equiv_plane.sort()
equiv_plane = tuple(equiv_plane)
all_plane_points.append(equiv_plane)
all_plane_points = list(set(all_plane_points))
all_plane_points = [list(equiv_plane) for equiv_plane in all_plane_points]
print('All plane points ({:d}) for {} : '.format(len(all_plane_points), cg_symbol))
print(all_plane_points)
|
gVallverdu/pymatgen
|
dev_scripts/chemenv/plane_multiplicity.py
|
Python
|
mit
| 1,764
|
[
"pymatgen"
] |
c37a962038812a1bf9ceb589fa8cae9bc9fe555e4d6fcf33a7d63763daf8303f
|
#!/usr/bin/env python
import sys
import os
import re
import glob
import xmltodict
import json
import yaml
import copy
import logging
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
from elasticsearch1 import Elasticsearch
from collections import OrderedDict
import datetime
import dateutil.parser
from itertools import izip
from distutils.version import LooseVersion
import csv
import shutil
from operator import itemgetter
es_queries = [
# query 0: PCAWGDATA-45_Sanger GNOS entries with study field ends with _test
{
"name": "sanger_vcf_with_study_field_ends_with_test",
"content":
{
"fields":[
"donor_unique_id"
],
"query":{
"wildcard":{
"variant_calling_results.sanger_variant_calling.study": "*_test"
}
},
"filter": {
"bool": {
"must": [
{
"type": {
"value": "donor"
}
}
]
# "must_not": [
# {
# "terms": {
# "flags.is_manual_qc_failed": [
# "T"
# ]
# }
# }
# ]
}
},
"size": 10000
}
},
# query 1: PCAWGDATA-47_donors with mismatch lane count
{
"name": "specimens_with_mismatch_lane_count",
"content":
{
"fields":[
"donor_unique_id"
],
"filter":{
"bool":{
"must": [
{
"type":{
"value":"donor"
}
}
],
"should": [
{
"terms":{
"normal_alignment_status.do_lane_count_and_bam_count_match":[
"F"
]
}
},
{
"terms":{
"normal_alignment_status.do_lane_counts_in_every_bam_entry_match":[
"F"
]
}
},
{
"nested": {
"path": "tumor_alignment_status",
"filter":{
"bool": {
"should": [
{
"terms":{
"tumor_alignment_status.do_lane_count_and_bam_count_match":[
"F"
]
}
},
{
"terms":{
"tumor_alignment_status.do_lane_counts_in_every_bam_entry_match":[
"F"
]
}
}
]
}
}
}
}
],
"must_not": [
{
"terms": {
"gnos_repos_with_alignment_result":[
"https://cghub.ucsc.edu/"
]
}
}
]
}
},
"size": 10000
}
},
# query 2: variant calling missing input
{
"name": "variant_callings_missing_input",
"content":{
"fields":[
"donor_unique_id"
],
"filter": {
"bool": {
"must": [
{
"type": {
"value": "donor"
}
},
{
"terms":{
"flags.is_bam_used_by_variant_calling_missing":[
"T"
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
}
]
}
},
"size": 10000
}
},
# query 3: get donors with mismatch duplicate bwa bams
{
"name": "donors_with_mismatch_duplicate_bwa_bams",
"content":{
"fields":[
"donor_unique_id"
],
"filter":{
"bool":{
"must":[
{
"type":{
"value":"donor"
}
},
{
"terms":{
"duplicated_bwa_alignment_summary.exists_mismatch_bwa_bams":[
"T"
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
}
]
}
},
"size": 10000
}
},
# query 4: get missing gnos_entry from santa_cruz_freeze
{
"name": "missing_gnos_entry_from_santa_cruz_freeze",
"content":{
"fields": ["donor_unique_id"],
"filter":{
"bool": {
"must":[
{
"type":{
"value":"donor"
}
},
{
"terms":{
"flags.is_santa_cruz_donor":[
"T"
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
}
]
}
},
"size": 10000
}
},
# query 5: get specimens with mismatch effective xml md5sum
{
"name": "specimens_with_mismatch_effective_xml_md5sum",
"content":{
"fields":[
"donor_unique_id"
],
"filter":{
"bool":{
"must":[
{
"type":{
"value":"donor"
}
},
{
"terms":{
"flags.exists_xml_md5sum_mismatch":[
"T"
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
}
]
}
},
"size": 10000
}
},
# query 6: get donors with broad_incomplete_uploads
{
"name": "broad_incomplete_uploads",
"content":{
"fields":[
"donor_unique_id"
],
"filter":{
"bool":{
"must":[
{
"type":{
"value":"donor"
}
},
{
"terms":{
"flags.is_broad_variant_calling_performed":[
"F"
]
}
},
{
"bool": {
"should":[
{
"terms": {
"flags.broad.broad_file_subset_exist": [
"T"
]
}
},
{
"terms": {
"flags.broad.muse_file_subset_exist": [
"T"
]
}
},
{
"terms": {
"flags.broad.broad_tar_file_subset_exist": [
"T"
]
}
},
{
"terms": {
"flags.broad.exist_file_subsets_mismatch": [
"T"
]
}
}
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
}
]
}
},
"size": 10000
}
},
# query 7: get donors with broad_successful_uploads
{
"name": "broad_successful_uploads",
"content":{
"fields":[
"donor_unique_id"
],
"filter":{
"bool":{
"must":[
{
"type":{
"value":"donor"
}
},
{
"terms":{
"flags.is_broad_variant_calling_performed":[
"T"
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
}
]
}
},
"size": 10000
}
},
# query 8: get donors exist_specimen_type_mismatch
{
"name": "aliquots_with_mismatch_specimen_type",
"content":{
"fields":["donor_unique_id"],
"filter":{
"bool":{
"must":[
{
"type":{
"value":"donor"
}
}
],
"should": [
{
"terms":{
"normal_alignment_status.exist_specimen_type_mismatch":[
"T"
]
}
},
{
"nested": {
"path": "tumor_alignment_status",
"filter":{
"bool": {
"should": [
{
"terms":{
"tumor_alignment_status.exist_specimen_type_mismatch":[
"T"
]
}
}
]
}
}
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
}
]
}
},
"size": 10000
}
},
# query 9: get donors exist vcf file prefix mismatch
{
"name": "donors_exist_vcf_file_prefix_mismatch",
"content":{
"fields":[
"donor_unique_id"
],
"filter":{
"bool":{
"must":[
{
"type":{
"value":"donor"
}
},
{
"terms":{
"flags.exists_vcf_file_prefix_mismatch":[
"T"
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
}
]
}
},
"size": 10000
}
},
# query 10: get missing gnos_entry from aug2015_release
{
"name": "missing_gnos_entry_from_aug2015_release",
"content":{
"fields": ["donor_unique_id"],
"filter":{
"bool": {
"must":[
{
"type":{
"value":"donor"
}
},
{
"terms":{
"flags.is_aug2015_donor":[
"T"
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
}
]
}
},
"size": 10000
}
},
#query 11: get donors having fixed or version higher than 1.0.4 DKFZ/EMBL
{
"name": "dkfz_embl_single_gnos_uploads",
"content":{
"fields":["donor_unique_id"],
"filter":{
"bool":{
"must":[
{
"term":{
"flags.is_dkfz_embl_variant_calling_performed":[
"T"
]
}
},
{
"term":{
"flags.is_dkfz_variant_calling_performed":[
"F"
]
}
},
{
"term":{
"flags.is_embl_variant_calling_performed":[
"F"
]
}
}
],
"must_not": [
{
"term": {
"flags.is_manual_qc_failed": [
"T"
]
}
}
]
}
},
"size": 10000
}
},
# query 12: ESAD-UK_GNOS_entries
{
"name": "ESAD-UK_unaligned_gnos_entries",
"content": {
"fields": "donor_unique_id",
"filter": {
"bool": {
"must": [
{
"type": {
"value": "donor"
}
},
{
"terms": {
"dcc_project_code": [
"ESAD-UK"
]
}
},
{
"terms": {
"flags.is_sanger_variant_calling_performed": [
"F"
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
}
]
}
},
"size": 10000
}
},
# query 13: get missing gnos_entry from oct2015_release
{
"name": "missing_gnos_entry_from_oct2015_release",
"content":{
"fields": ["donor_unique_id"],
"filter":{
"bool": {
"must":[
{
"type":{
"value":"donor"
}
},
{
"terms":{
"flags.is_oct2015_donor":[
"T"
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
}
]
}
},
"size": 10000
}
},
# query 14: get sanger uploads
{
"name": "sanger_uploads_svfixed_status",
"content":{
"fields":[
"donor_unique_id"
],
"filter":{
"bool":{
"must":[
{
"type":{
"value":"donor"
}
},
{
"terms":{
"flags.is_sanger_variant_calling_performed":[
"T"
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
}
]
}
},
"size": 10000
}
},
# query 15: get broad uploads
{
"name": "broad_uploads_snvfixed_status",
"content":{
"fields":[
"donor_unique_id"
],
"filter":{
"bool":{
"must":[
{
"type":{
"value":"donor"
}
},
{
"terms":{
"flags.is_broad_variant_calling_performed":[
"T"
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
}
]
}
},
"size": 10000
}
},
# query 16: get missing gnos_entry from mar2016_release
{
"name": "missing_gnos_entry_from_mar2016_release",
"content":{
"fields": ["donor_unique_id"],
"filter":{
"bool": {
"must":[
{
"type":{
"value":"donor"
}
},
{
"terms":{
"flags.is_mar2016_donor":[
"T"
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
}
]
}
},
"size": 10000
}
},
# query 17: get oxog uploads
{
"name": "variant_call_entries_with_broad_oxog_filter_applied",
"content":{
"fields":[
"donor_unique_id"
],
"filter":{
"bool":{
"must":[
{
"type":{
"value":"donor"
}
},
{
"terms":{
"flags.is_oxog_variant_calling_performed":[
"T"
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
}
]
}
},
"size": 10000
}
},
# query 18: get minibam uploads
{
"name": "minibams_extracted_from_variant_regions",
"content":{
"fields":[
"donor_unique_id"
],
"filter":{
"bool":{
"must":[
{
"type":{
"value":"donor"
}
},
{
"terms":{
"flags.is_minibam_variant_calling_performed":[
"T"
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
}
]
}
},
"size": 10000
}
},
# query 19: get gender information of pcawg donors
{
"name": "pcawg_donors_gender_info",
"content":{
"fields":[
"donor_unique_id"
],
"filter":{
"bool":{
"must":[
{
"type":{
"value":"donor"
}
},
{
"terms":{
"flags.is_normal_specimen_aligned":[
"T"
]
}
},
{
"terms":{
"flags.are_all_tumor_specimens_aligned":[
"T"
]
}
},
# {
# "terms":{
# "flags.is_mar2016_donor":[
# "T"
# ]
# }
# }
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
}
]
}
},
"size": 10000
}
},
# query 20: get missing gnos_entry from may2016_release
{
"name": "missing_gnos_entry_from_may2016_release",
"content":{
"fields": ["donor_unique_id"],
"filter":{
"bool": {
"must":[
{
"type":{
"value":"donor"
}
},
{
"terms":{
"flags.is_may2016_donor":[
"T"
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
}
]
}
},
"size": 10000
}
},
# query 21: get all RNA-Seq data information
{
"name": "rna-seq_summary",
"content":{
"fields":[
"donor_unique_id"
],
"filter":{
"bool":{
"must":[
{
"type":{
"value":"donor"
}
},
{
"bool": {
"should":[
{
"terms": {
"flags.is_normal_star_rna_seq_alignment_performed": [
"T"
]
}
},
{
"terms": {
"flags.is_normal_tophat_rna_seq_alignment_performed": [
"T"
]
}
},
{
"terms": {
"flags.is_tumor_star_rna_seq_alignment_performed": [
"T"
]
}
},
{
"terms": {
"flags.is_tumor_tophat_rna_seq_alignment_performed": [
"T"
]
}
}
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
}
]
}
},
"size": 10000
}
},
]
def get_donor_json(es, es_index, donor_unique_id):
es_query_donor = {
"query": {
"term": {
"donor_unique_id": donor_unique_id
}
}
}
response = es.search(index=es_index, body=es_query_donor)
es_json = response['hits']['hits'][0]['_source']
return es_json
def get_donors_list(es, es_index, es_queries, q_index):
response = es.search(index=es_index, body=es_queries[q_index].get('content'))
donors_list = []
for p in response['hits']['hits']:
donors_list.append(p.get('fields').get('donor_unique_id')[0])
return donors_list
def create_report_info(donor_unique_id, es_json, q_index, annotations):
report_info_list = []
report_info = OrderedDict()
report_info['donor_unique_id'] = donor_unique_id
report_info['submitter_donor_id'] = es_json['submitter_donor_id']
report_info['dcc_project_code'] = es_json['dcc_project_code']
# annotations = {}
if q_index == 0:
add_report_info_0(report_info, report_info_list, es_json)
if q_index == 1:
add_report_info_1(report_info, report_info_list, es_json)
if q_index == 2:
add_report_info_2(report_info, report_info_list, es_json)
if q_index == 3:
report_info['is_train2_donor'] = es_json.get('flags').get('is_train2_donor')
report_info['is_sanger_variant_calling_performed'] = es_json.get('flags').get('is_sanger_variant_calling_performed')
add_report_info_3(report_info, report_info_list, es_json)
if q_index == 4:
flag = 'is_santa_cruz_entry'
add_report_info_4_10(report_info, report_info_list, es_json, flag)
if q_index == 10:
flag = 'is_aug2015_entry'
add_report_info_4_10(report_info, report_info_list, es_json, flag)
if q_index == 13:
flag = 'is_oct2015_entry'
add_report_info_4_10(report_info, report_info_list, es_json, flag)
if q_index == 16:
flag = 'is_mar2016_entry'
add_report_info_4_10(report_info, report_info_list, es_json, flag)
if q_index == 20:
flag = 'is_may2016_entry'
add_report_info_4_10(report_info, report_info_list, es_json, flag)
if q_index == 5:
add_report_info_5(report_info, report_info_list, es_json)
if q_index in [6, 7]:
add_report_info_6_7(report_info, report_info_list, es_json)
if q_index == 8:
add_report_info_8(report_info, report_info_list, es_json)
if q_index == 9:
add_report_info_9(report_info, report_info_list, es_json)
if q_index == 11:
add_report_info_14_15(report_info, report_info_list, es_json, 'dkfz_embl')
if q_index == 12:
# annotations = read_annotations(annotations, 'esad-uk_reheader_uuid', 'esad-uk_uuids.txt')
add_report_info_12(report_info, report_info_list, es_json, annotations)
if q_index == 14:
add_report_info_14_15(report_info, report_info_list, es_json, 'sanger')
if q_index == 15:
add_report_info_14_15(report_info, report_info_list, es_json, 'broad')
if q_index == 17:
add_report_info_14_15(report_info, report_info_list, es_json, 'oxog')
if q_index == 18:
add_report_info_14_15(report_info, report_info_list, es_json, 'minibam')
if q_index == 19:
# annotations = read_annotations(annotations, 'gender', '../pcawg-ega-submission/annotation/donor.all_projects.release20.tsv')
# annotations = read_annotations(annotations, 'gender_update', '../pcawg-ega-submission/annotation/donor.gender_update.release21.tsv')
add_report_info_19(report_info, report_info_list, es_json, annotations)
if q_index == 21:
add_report_info_21(report_info, report_info_list, es_json)
return report_info_list
def get_mapping(source):
target = {
"XX": "female",
"XY": "male",
"female": "female",
"male": "male"
}
return target.get(source)
def gen_dict_extract(key, var):
if hasattr(var,'iteritems'):
for k, v in var.iteritems():
if k.startswith(key) and not isinstance(v, dict):
yield v
if isinstance(v, dict):
for result in gen_dict_extract(key, v):
yield result
elif isinstance(v, list):
for d in v:
for result in gen_dict_extract(key, d):
yield result
def add_report_info_21(report_info, report_info_list, es_json):
rna_seq_info = es_json.get('rna_seq').get('alignment')
report_info['library_strategy'] = 'RNA-Seq'
for specimen_type in rna_seq_info.keys():
if not rna_seq_info.get(specimen_type): # the specimen_type has no alignment result
continue
if 'normal' in specimen_type:
aliquot = rna_seq_info.get(specimen_type)
for workflow_type in aliquot.keys():
report_info['workflow'] = workflow_type
add_report_info_21_alignment(aliquot.get(workflow_type), report_info, report_info_list)
else:
for aliquot in rna_seq_info.get(specimen_type):
for workflow_type in aliquot.keys():
report_info['workflow'] = workflow_type
add_report_info_21_alignment(aliquot.get(workflow_type), report_info, report_info_list)
return report_info_list
def add_report_info_21_alignment(alignment, report_info, report_info_list):
report_info['aliquot_id'] = alignment.get('aliquot_id')
report_info['icgc_sample_id'] = alignment.get('icgc_sample_id')
report_info['icgc_specimen_id'] = alignment.get('icgc_specimen_id')
report_info['dcc_specimen_type'] = alignment.get('dcc_specimen_type')
if alignment.get('aligned_bam'):
report_info['gnos_id'] = alignment.get('aligned_bam').get('gnos_id')
report_info['gnos_repo'] = alignment.get('aligned_bam').get('gnos_repo')
report_info['exist_bai_file'] = True if alignment.get('aligned_bam').get('bai_file_name') else False
report_info_list.append(copy.deepcopy(report_info))
return report_info_list
def add_report_info_19(report_info, report_info_list, es_json, annotations):
report_info['gender'] = set()
report_info['dcc_gender'] = annotations.get('gender').get(es_json.get('donor_unique_id')) if annotations.get('gender').get(es_json.get('donor_unique_id')) else None
if report_info.get('dcc_gender'): report_info['gender'].add(report_info['dcc_gender'])
for vcf in ['sanger', 'dkfz_embl', 'broad', 'muse']:
report_info[vcf+'_gender'] = None
if es_json.get('variant_calling_results') and es_json.get('variant_calling_results').get(vcf+'_variant_calling'):
if es_json.get('variant_calling_results').get(vcf+'_variant_calling').get('workflow_details') and \
es_json.get('variant_calling_results').get(vcf+'_variant_calling').get('workflow_details').get('variant_qc_metrics'):
v = list(gen_dict_extract('gender', es_json.get('variant_calling_results').get(vcf+'_variant_calling').get('workflow_details').get('variant_qc_metrics')))
report_info[vcf+'_gender'] = get_mapping(v[0]) if v else None
if report_info.get(vcf+'_gender'): report_info['gender'].add(report_info[vcf+'_gender'])
report_info['exist_gender_discrepancy'] = False if len(report_info['gender']) == 1 else True
report_info_list.append(copy.deepcopy(report_info))
def add_report_info_14_15(report_info, report_info_list, es_json, workflow_type):
report_info['tumor_aliquot_ids'] = es_json.get('all_tumor_specimen_aliquots')
if es_json.get('variant_calling_results') and es_json.get('variant_calling_results').get(workflow_type+'_variant_calling'):
report_info['gnos_repo'] = es_json.get('variant_calling_results').get(workflow_type+'_variant_calling').get('gnos_repo')
report_info['gnos_id'] = es_json.get('variant_calling_results').get(workflow_type+'_variant_calling').get('gnos_id')
report_info['gnos_last_modified'] = es_json.get('variant_calling_results').get(workflow_type+'_variant_calling').get('gnos_last_modified')
report_info['vcf_workflow_result_version'] = es_json.get('variant_calling_results').get(workflow_type+'_variant_calling').get('vcf_workflow_result_version')
report_info_list.append(copy.deepcopy(report_info))
return report_info_list
def add_report_info_12(report_info, report_info_list, es_json, annotations):
report_info['is_sanger_variant_calling_performed'] = es_json.get('flags').get('is_sanger_variant_calling_performed')
report_info['is_dkfz_embl_variant_calling_performed'] = es_json.get('flags').get('is_dkfz_embl_variant_calling_performed')
report_info['dkfz_embl_variant_calling_gnos_id'] = es_json.get('variant_calling_results').get('dkfz_embl_variant_calling').get('gnos_id') if es_json.get('flags').get('is_dkfz_embl_variant_calling_performed') else None
report_info['dkfz_variant_calling_gnos_id'] = es_json.get('variant_calling_results').get('dkfz_variant_calling').get('gnos_id') if es_json.get('flags').get('is_dkfz_variant_calling_performed') else None
report_info['embl_variant_calling_gnos_id'] = es_json.get('variant_calling_results').get('embl_variant_calling').get('gnos_id') if es_json.get('flags').get('is_embl_variant_calling_performed') else None
report_info['is_broad_variant_calling_performed'] = es_json.get('flags').get('is_broad_variant_calling_performed')
report_info['broad_variant_calling_gnos_id'] = es_json.get('variant_calling_results').get('broad_variant_calling').get('gnos_id') if es_json.get('flags').get('broad').get('broad_file_subset_exist') else None
report_info['broad_tar_variant_calling_gnos_id'] = es_json.get('variant_calling_results').get('broad_tar_variant_calling').get('gnos_id') if es_json.get('flags').get('broad').get('broad_tar_file_subset_exist') else None
report_info['muse_variant_calling_gnos_id'] = es_json.get('variant_calling_results').get('muse_variant_calling').get('gnos_id') if es_json.get('flags').get('broad').get('muse_file_subset_exist') else None
if es_json.get('normal_alignment_status'):
add_report_info_12_aliquot(es_json.get('normal_alignment_status'), report_info, report_info_list, annotations)
if es_json.get('tumor_alignment_status'):
for aliquot in es_json.get('tumor_alignment_status'):
add_report_info_12_aliquot(aliquot, report_info, report_info_list, annotations)
return report_info_list
def add_report_info_12_aliquot(aliquot, report_info, report_info_list, annotations):
report_info['library_strategy'] = 'WGS'
report_info['aliquot_id'] = aliquot.get('aliquot_id')
report_info['submitter_specimen_id'] = aliquot.get('submitter_specimen_id')
report_info['submitter_sample_id'] = aliquot.get('submitter_sample_id')
report_info['dcc_specimen_type'] = aliquot.get('dcc_specimen_type')
report_info['aligned'] = aliquot.get('aligned')
report_info['total_lanes'] = aliquot.get('lane_count')
report_info['aligned_bam_gnos_id'] = None
report_info['aligned_bam_gnos_repo'] = []
if aliquot.get('aligned_bam'):
report_info['aligned_bam_gnos_id'] = aliquot.get('aligned_bam').get('gnos_id')
report_info['aligned_bam_gnos_repo'] = aliquot.get('aligned_bam').get('gnos_repo')
report_info['bam_with_unmappable_reads_gnos_id'] = None
report_info['bam_with_unmappable_reads_gnos_repo'] = []
if aliquot.get('bam_with_unmappable_reads'):
report_info['bam_with_unmappable_reads_gnos_id'] = aliquot.get('bam_with_unmappable_reads').get('gnos_id')
report_info['bam_with_unmappable_reads_gnos_repo'] = aliquot.get('bam_with_unmappable_reads').get('gnos_repo')
if aliquot.get('unaligned_bams'):
report_info['entity_type'] = 'unaligned_bams'
for unaligned_bams in aliquot.get('unaligned_bams'):
report_info['unaligned_bams_gnos_id'] = unaligned_bams.get('gnos_id')
report_info['is_reheader'] = True if unaligned_bams.get('gnos_id') in annotations.get('esad-uk_reheader_uuid') else False
for gnos_repo in unaligned_bams.get('gnos_repo'):
report_info['unaligned_bams_gnos_repo'] = gnos_repo
report_info['unaligned_bams_gnos_metadata_url'] = gnos_repo + 'cghub/metadata/analysisFull/' + report_info['unaligned_bams_gnos_id']
report_info_list.append(copy.deepcopy(report_info))
return report_info_list
# def add_report_info_11(report_info, report_info_list, es_json):
# report_info['tumor_aliquot_ids'] = es_json.get('all_tumor_specimen_aliquots')
# if es_json.get('variant_calling_results') and es_json.get('variant_calling_results').get('dkfz_embl_variant_calling'):
# report_info['gnos_repo'] = es_json.get('variant_calling_results').get('dkfz_embl_variant_calling').get('gnos_repo')
# report_info['gnos_id'] = es_json.get('variant_calling_results').get('dkfz_embl_variant_calling').get('gnos_id')
# report_info['gnos_last_modified'] = es_json.get('variant_calling_results').get('dkfz_embl_variant_calling').get('gnos_last_modified')
# report_info_list.append(copy.deepcopy(report_info))
# return report_info_list
def add_report_info_9(report_info, report_info_list, es_json):
report_info['tumor_aliquot_ids'] = es_json.get('all_tumor_specimen_aliquots')
if es_json.get('variant_calling_results'):
vcf = es_json.get('variant_calling_results')
for workflow in ['sanger', 'embl', 'dkfz', 'dkfz_embl', 'broad', 'muse', 'broad_tar']:
if vcf.get(workflow+'_variant_calling') and vcf.get(workflow+'_variant_calling').get('exists_' + workflow + '_file_prefix_mismatch'):
report_info['workflow_name'] = workflow+'_variant_calling'
report_info['gnos_repo'] = vcf.get(workflow+'_variant_calling').get('gnos_repo')[0]
report_info['gnos_id'] = vcf.get(workflow+'_variant_calling').get('gnos_id')
file_prefix = set()
for f in vcf.get(workflow+'_variant_calling').get('files'):
file_prefix.add(f.get('file_name').split('.')[0])
report_info['file_prefix'] = file_prefix
report_info_list.append(copy.deepcopy(report_info))
return report_info_list
def add_report_info_8_aliquot(aliquot, report_info, report_info_list):
if aliquot.get('exist_specimen_type_mismatch'):
report_info['aliquot_id'] = aliquot.get('aliquot_id')
report_info['submitter_specimen_id'] = aliquot.get('submitter_specimen_id')
report_info['submitter_sample_id'] = aliquot.get('submitter_sample_id')
report_info['dcc_specimen_type'] = aliquot.get('dcc_specimen_type')
report_info['aligned'] = True if aliquot.get('aligned') else False
report_info['exist_aligned_bam_specimen_type_mismatch'] = aliquot.get('exist_aligned_bam_specimen_type_mismatch')
report_info['exist_unaligned_bam_specimen_type_mismatch'] = aliquot.get('exist_unaligned_bam_specimen_type_mismatch')
report_info['exist_bam_with_unmappable_reads_specimen_type_mismatch'] = aliquot.get('exist_bam_with_unmappable_reads_specimen_type_mismatch')
report_info_list.append(copy.deepcopy(report_info))
return report_info_list
def add_report_info_8(report_info, report_info_list, es_json):
if es_json.get('normal_alignment_status'):
add_report_info_8_aliquot(es_json.get('normal_alignment_status'), report_info, report_info_list)
if es_json.get('tumor_alignment_status'):
for aliquot in es_json.get('tumor_alignment_status'):
add_report_info_8_aliquot(aliquot, report_info, report_info_list)
return report_info_list
def add_report_info_6_7(report_info, report_info_list, es_json):
report_info['tumor_aliquot_ids'] = es_json.get('aligned_tumor_specimen_aliquots')
if es_json.get('variant_calling_results'):
vcf = es_json.get('variant_calling_results')
for workflow_type in ['broad', 'broad_tar', 'muse']:
report_info[workflow_type+'_gnos_repo'] = vcf.get(workflow_type+'_variant_calling').get('gnos_repo')[0] if vcf.get(workflow_type+'_variant_calling') else None
report_info[workflow_type+'_gnos_id'] = vcf.get(workflow_type+'_variant_calling').get('gnos_id') if vcf.get(workflow_type+'_variant_calling') else None
report_info[workflow_type+'_gnos_last_modified'] = vcf.get(workflow_type+'_variant_calling').get('gnos_last_modified')[0].split('T')[0] if vcf.get(workflow_type+'_variant_calling') else None
report_info['is_cross_referencing_mismatch'] = es_json.get('flags').get('broad').get('exist_file_subsets_mismatch')
report_info_list.append(copy.deepcopy(report_info))
return report_info_list
def add_report_info_5(report_info, report_info_list, es_json):
if es_json.get('normal_alignment_status'):
aliquot = es_json.get('normal_alignment_status')
add_report_info_5_aliquot(aliquot, report_info, report_info_list, 'wgs_bwa_alignment')
if es_json.get('tumor_alignment_status'):
for aliquot in es_json.get('tumor_alignment_status'):
add_report_info_5_aliquot(aliquot, report_info, report_info_list, 'wgs_bwa_alignment')
if es_json.get('variant_calling_results'):
for k, v in es_json.get('variant_calling_results').iteritems():
add_report_info_5_aliquot(v, report_info, report_info_list, k)
if es_json.get('rna_seq').get('alignment'):
for k, v in es_json.get('rna_seq').get('alignment').iteritems():
if not v: continue
if k=='normal':
for key, value in v.iteritems():
workflow = 'rna_seq_'+key+'_alignment'
add_report_info_5_aliquot(value, report_info, report_info_list, workflow)
if k=='tumor':
for aliquot in v:
for key, value in aliquot.iteritems():
workflow = 'rna_seq_'+key+'_alignment'
add_report_info_5_aliquot(value, report_info, report_info_list, workflow)
return report_info_list
def add_report_info_5_aliquot(aliquot, report_info, report_info_list, workflow):
if aliquot.get('exists_xml_md5sum_mismatch'):
report_info['aliquot_id'] = aliquot.get('aliquot_id') if workflow.endswith('alignment') else None
report_info['dcc_specimen_type'] = aliquot.get('dcc_specimen_type') if workflow.endswith('alignment') else None
report_info['workflow'] = workflow
report_info['gnos_repo'] = aliquot.get('aligned_bam').get('gnos_repo') if workflow.endswith('alignment') else aliquot.get('gnos_repo')
report_info['gnos_id'] = aliquot.get('aligned_bam').get('gnos_id') if workflow.endswith('alignment') else aliquot.get('gnos_id')
report_info['effective_xml_md5sum'] = aliquot.get('aligned_bam').get('effective_xml_md5sum') if workflow.endswith('alignment') else aliquot.get('effective_xml_md5sum')
report_info_list.append(copy.deepcopy(report_info))
return report_info_list
def add_report_info_4_10(report_info, report_info_list, es_json, flag):
if es_json.get('bam_files'):
for bam in es_json.get('bam_files'):
if not bam.get(flag):
continue
report_info['gnos_id'] = bam.get('bam_gnos_ao_id')
report_info_list.append(copy.deepcopy(report_info))
if es_json.get('variant_calling_results'):
vcf = es_json.get('variant_calling_results')
for workflow in ['sanger', 'dkfz_embl', 'broad', 'muse', 'broad_tar', 'minibam']:
if vcf.get(workflow+'_variant_calling') and vcf.get(workflow+'_variant_calling').get(flag):
report_info['gnos_id'] = vcf.get(workflow+'_variant_calling').get('gnos_id')
report_info_list.append(copy.deepcopy(report_info))
return report_info_list
def add_report_info_0(report_info, report_info_list, es_json):
report_info['gnos_id'] = es_json.get('variant_calling_results').get('sanger_variant_calling').get('gnos_id')
report_info['study'] = es_json.get('variant_calling_results').get('sanger_variant_calling').get('study')
for gnos_repo in es_json.get('variant_calling_results').get('sanger_variant_calling').get('gnos_repo'):
report_info['gnos_repo'] = gnos_repo
report_info['gnos_metadata_url'] = gnos_repo + 'cghub/metadata/analysisFull/' + report_info['gnos_id']
report_info_list.append(copy.deepcopy(report_info))
def add_report_info_1(report_info, report_info_list, es_json):
if es_json.get('normal_alignment_status'):
add_report_info_1_aliquot(es_json.get('normal_alignment_status'), report_info, report_info_list)
if es_json.get('tumor_alignment_status'):
for aliquot in es_json.get('tumor_alignment_status'):
add_report_info_1_aliquot(aliquot, report_info, report_info_list)
return report_info_list
def add_report_info_1_aliquot(aliquot, report_info, report_info_list):
report_info['aliquot_id'] = aliquot.get('aliquot_id')
report_info['submitter_specimen_id'] = aliquot.get('submitter_specimen_id')
report_info['submitter_sample_id'] = aliquot.get('submitter_sample_id')
report_info['dcc_specimen_type'] = aliquot.get('dcc_specimen_type')
report_info['aligned'] = True if aliquot.get('aligned') else False
if not aliquot.get('do_lane_count_and_bam_count_match') or not aliquot.get('do_lane_counts_in_every_bam_entry_match'):
report_info['number_of_bams'] = len(aliquot.get('unaligned_bams'))
report_info['total_lanes'] = aliquot.get('lane_count')
report_info_list.append(copy.deepcopy(report_info))
return report_info_list
def add_report_info_2(report_info, report_info_list, es_json):
if es_json.get('variant_calling_results'):
vcf = es_json.get('variant_calling_results')
report_info['normal_bam_gnos_id'] = es_json.get('normal_alignment_status').get('aligned_bam').get('gnos_id') if es_json.get('normal_alignment_status').get('aligned_bam') else None
report_info['tumor_bam_gnos_id'] = []
for bam in es_json.get('tumor_alignment_status'):
if bam.get('aligned_bam'):
report_info['tumor_bam_gnos_id'].append(bam.get('aligned_bam').get('gnos_id'))
for workflow in ['sanger', 'embl', 'dkfz', 'dkfz_embl', 'broad', 'muse', 'broad_tar']:
if vcf.get(workflow+'_variant_calling') and vcf.get(workflow+'_variant_calling').get('is_bam_used_by_' + workflow + '_missing'):
report_info['workflow_name'] = vcf.get(workflow+'_variant_calling').get('workflow_details').get('variant_workflow_name')
report_info['vcf_gnos_repo'] = vcf.get(workflow+'_variant_calling').get('gnos_repo')[0]
report_info['vcf_gnos_id'] = vcf.get(workflow+'_variant_calling').get('gnos_id')
report_info['used_normal_bam_gnos_id'] = None
report_info['used_normal_bam_gnos_url'] = None
report_info['is_normal_bam_used_by_vcf_missing'] = vcf.get(workflow+'_variant_calling').get('is_normal_bam_used_by_'+workflow+'_missing')
report_info['used_tumor_bam_gnos_id'] = []
report_info['used_tumor_bam_gnos_url'] = []
report_info['is_tumor_bam_used_by_vcf_missing'] = vcf.get(workflow+'_variant_calling').get('is_tumor_bam_used_by_'+workflow+'_missing')
for vcf_input in vcf.get(workflow+'_variant_calling').get('workflow_details').get('variant_pipeline_input_info'):
if 'normal' in vcf_input.get('attributes').get('dcc_specimen_type').lower():
report_info['used_normal_bam_gnos_id'] = vcf_input.get('attributes').get('analysis_id')
report_info['used_normal_bam_gnos_url'] = vcf_input.get('attributes').get('analysis_url')
if 'tumour' in vcf_input.get('attributes').get('dcc_specimen_type').lower():
report_info['used_tumor_bam_gnos_id'].append(vcf_input.get('attributes').get('analysis_id'))
report_info['used_tumor_bam_gnos_url'].append(vcf_input.get('attributes').get('analysis_url'))
report_info_list.append(copy.deepcopy(report_info))
return report_info_list
def add_report_info_3(report_info, report_info_list, es_json):
duplicate_bwa_bams = es_json.get('duplicated_bwa_alignment_summary')
if duplicate_bwa_bams.get('exists_mismatch_bwa_bams_in_normal'):
aliquot = duplicate_bwa_bams.get('normal')
add_report_info_3_aliquot(aliquot, report_info, report_info_list)
if duplicate_bwa_bams.get('exists_mismatch_bwa_bams_in_tumor'):
for aliquot in duplicate_bwa_bams.get('tumor'):
add_report_info_3_aliquot(aliquot, report_info, report_info_list)
def add_report_info_3_aliquot(aliquot, report_info, report_info_list):
if aliquot.get('exists_mismatch_bwa_bams'):
report_info['aliquot_id'] = aliquot.get('aliquot_id')
report_info['dcc_specimen_type'] = aliquot.get('dcc_specimen_type')
report_info['exists_gnos_id_mismatch'] = aliquot.get('exists_gnos_id_mismatch')
report_info['exists_md5sum_mismatch'] = aliquot.get('exists_md5sum_mismatch')
report_info['exists_version_mismatch'] = aliquot.get('exists_version_mismatch')
aligned_bam = aliquot.get('aligned_bam')
# find gnos_id which are train2_bams
report_info['train2_bams_gnos_id'] = set()
report_info['gnos_id_to_be_reassigned_as_train2_bam'] = set()
report_info['gnos_id_to_keep'] = set()
report_info['gnos_id_to_be_removed'] = set()
gnos_id_all = set()
gnos_id_without_sanger_call = []
for bam in aligned_bam:
gnos_id_all.add(bam.get('gnos_id'))
if bam.get('is_train2_bam'):
report_info['train2_bams_gnos_id'].add(bam.get('gnos_id'))
report_info['gnos_id_to_keep'].add(bam.get('gnos_id'))
else:
if bam.get('is_used_in_sanger_variant_call'):
report_info['gnos_id_to_be_reassigned_as_train2_bam'].add(bam.get('gnos_id'))
report_info['gnos_id_to_keep'].add(bam.get('gnos_id'))
else:
gnos_id_without_sanger_call.append(bam.get('gnos_id'))
if not report_info['gnos_id_to_keep']:
max_num = max(map(gnos_id_without_sanger_call.count, gnos_id_without_sanger_call))
gnos_id_tmp = [x for x in gnos_id_without_sanger_call if gnos_id_without_sanger_call.count(x) == max_num]
gnos_id_tmp = gnos_id_tmp[0]
report_info['gnos_id_to_be_reassigned_as_train2_bam'].add(gnos_id_tmp)
report_info['gnos_id_to_keep'].add(gnos_id_tmp)
report_info['gnos_id_to_be_removed'] = gnos_id_all - report_info['gnos_id_to_keep']
report_info_list.append(copy.deepcopy(report_info))
return report_info_list
def init_report_dir(metadata_dir, report_name, repo):
report_dir = metadata_dir + '/reports/' + report_name if not repo else metadata_dir + '/reports/' + report_name + '/' + repo
if not os.path.exists(report_dir): os.makedirs(report_dir) # make the folder if not exists
return report_dir
def read_annotations(annotations, type, file_name):
if not os.path.isfile(file_name):
return
with open(file_name, 'r') as r:
if annotations.get(type): # reset annotation if exists
del annotations[type]
if type == 'esad-uk_reheader_uuid':
annotations[type] = set()
for line in r:
if line.startswith('#'): continue
if len(line.rstrip()) == 0: continue
annotations[type].add(line.rstrip())
elif type == 'gender':
annotations[type] = {}
reader = csv.DictReader(r, delimiter='\t')
for row in reader:
# if not row.get('study_donor_involved_in') == 'PCAWG': continue
if not row.get('project_code') or not row.get('submitted_donor_id'): continue
annotations[type][row.get('project_code')+'::'+row.get('submitted_donor_id')] = row.get('donor_sex') if row.get('donor_sex') else None
elif type == 'gender_update':
reader = csv.DictReader(r, delimiter='\t')
for row in reader:
if not row.get('donor_unique_id'): continue
annotations['gender'][row.get('donor_unique_id')] = row.get('DCC ICGC21 submitted donor_sex') if row.get('DCC ICGC21 submitted donor_sex') else None
else:
print('unknown annotation type: {}'.format(type))
return annotations
def main(argv=None):
parser = ArgumentParser(description="Get Donor Info For Specific Query",
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-m", "--metadata_dir", dest="metadata_dir",
help="Directory containing metadata manifest files", required=True)
parser.add_argument("-r", "--gnos_repo", dest="repo",
help="Specify which GNOS repo to process, process all repos if none specified", required=False)
parser.add_argument("-q", "--ES_query", dest="q_index",
help="Specify which ES_query to be used", required=False)
args = parser.parse_args()
metadata_dir = args.metadata_dir # this dir contains gnos manifest files, will also host all reports
repo = args.repo
q_index = args.q_index
if not os.path.isdir(metadata_dir): # TODO: should add more directory name check to make sure it's right
sys.exit('Error: specified metadata directory does not exist!')
q_index = range(len(es_queries)) if not q_index else [int(q_index)]
timestamp = str.split(metadata_dir, '/')[-1]
es_index = 'p_' + ('' if not repo else repo+'_') + re.sub(r'\D', '', timestamp).replace('20','',1)
es_type = "donor"
es_host = 'localhost:9200'
es = Elasticsearch([es_host], timeout=600)
# output result
report_name = re.sub(r'^generate_', '', os.path.basename(__file__))
report_name = re.sub(r'\.py$', '', report_name)
report_dir = init_report_dir(metadata_dir, report_name, repo)
annotations = {}
annotations = read_annotations(annotations, 'esad-uk_reheader_uuid', 'esad-uk_uuids.txt')
annotations = read_annotations(annotations, 'gender', '../pcawg-operations/lists/donor.all_projects.release20.tsv')
annotations = read_annotations(annotations, 'gender_update', '../pcawg-operations/lists/donor.gender_update.release21.tsv')
for q in q_index:
report_tsv_fh = open(report_dir + '/' + es_queries[q].get('name') + '.txt', 'w')
# get the list of donors
donors_list = get_donors_list(es, es_index, es_queries, q)
report_info_list_full = []
for donor_unique_id in donors_list:
# get json doc for each donor
es_json = get_donor_json(es, es_index, donor_unique_id)
report_info_list_donor = create_report_info(donor_unique_id, es_json, q, annotations)
report_info_list_full.extend(report_info_list_donor)
# do diff for santa_cruz missing only
if q in [4, 10, 13, 16, 20]:
if q==4:
release_tsv = '../pcawg-operations/data_releases/santa_cruz/santa_cruz_freeze_entry.tsv'
elif q==10:
release_tsv = '../pcawg-operations/data_releases/aug2015/release_aug2015_entry.tsv'
elif q==13:
release_tsv = '../pcawg-operations/data_releases/oct2015/release_oct2015_entry.tsv'
elif q==16:
release_tsv = '../pcawg-operations/data_releases/mar2016/release_mar2016_entry.tsv'
elif q==20:
release_tsv = '../pcawg-operations/data_releases/may2016/release_may2016_entry.tsv'
else:
print('No entry for this query!')
# generate the set of gnos_id
gnos_id_set = set([l.get('gnos_id') for l in report_info_list_full])
report_info_list_full = []
# read bench mark santa_cruz list, hardcode the location of santa_cruz_freeze_json
with open(release_tsv, 'r') as s:
reader = csv.DictReader(s, delimiter='\t')
for row in reader:
if not row.get('gnos_id') in gnos_id_set:
row_order = OrderedDict()
for fn in reader.fieldnames:
row_order[fn.strip('#')] = row.get(fn)
report_info_list_full.append(row_order)
report_info_list_full.sort(key=itemgetter('donor_unique_id'))
header = True
for r in report_info_list_full:
if header:
report_tsv_fh.write('\t'.join(r.keys()) + '\n')
header = False
# make the list of output from dict
line = []
for p in r.keys():
if isinstance(r.get(p), list):
line.append('|'.join(r.get(p)))
elif isinstance(r.get(p), set):
line.append('|'.join(list(r.get(p))))
elif r.get(p) is None:
line.append('')
else:
line.append(str(r.get(p)))
report_tsv_fh.write('\t'.join(line) + '\n')
report_tsv_fh.close()
return 0
if __name__ == "__main__":
sys.exit(main())
|
ICGC-TCGA-PanCancer/pcawg-central-index
|
pcawg_metadata_parser/generate_QC_reports.py
|
Python
|
gpl-2.0
| 63,250
|
[
"BWA"
] |
b500860c33a285df54eda75f22c9ec6375aa5e5e80aa0ab80f9f8334ea69ca0b
|
"""
Painless environment setup for acceptance tests. Powered by behave.
Visit the docs at
https://behave.readthedocs.io/en/latest/tutorial.html#environmental-controls
"""
from contextlib import contextmanager
from os import chdir, getcwd, system
from os.path import dirname, join
from shutil import rmtree
from tempfile import mkdtemp
def before_all(context):
"""
Before the first test starts, find out and create directory paths we want
to use.
"""
@contextmanager
def safe_chdir(path):
"""Restore the original directory when leaving the with-clause"""
old_path = getcwd()
chdir(path)
try:
yield
finally:
chdir(old_path)
def set_logfilename(name):
"""Set the logfile context value using for logging system calls"""
context.logfile = join(context.temp_dir, name + '.log')
def log_run(command):
"""Run system commands, log their output, return the exit status"""
context.exit_code = system('{command} > {logfile} 2>&1'.format(
command=command,
logfile=context.logfile,
))
with open(context.logfile) as logfile:
context.log = logfile.read()
return context.exit_code
def explain_log(message):
"""Helper function for assertions"""
return '{message}\n' \
'----------------- (log follows)\n' \
'{log}'.format(message=message, log=context.log)
context.safe_chdir = safe_chdir
context.set_logfilename = set_logfilename
context.log_run = log_run
context.explain_log = explain_log
context.project_dir = dirname(dirname(dirname(__file__)))
context.temp_dir = mkdtemp(prefix='painless-acceptance-tests-')
def before_scenario(context, scenario):
pass
def after_scenario(context, scenario):
"""
Clean up cookiecutter data after each scenario.
"""
if context.generated_dir:
rmtree(context.generated_dir)
def after_all(context):
"""
After all tests, do cleanup work.
"""
rmtree(context.temp_dir)
|
painless-software/painless-continuous-delivery
|
tests/acceptance/environment.py
|
Python
|
apache-2.0
| 2,100
|
[
"VisIt"
] |
b2b8e449d29175e08629771911b14f49de46edefe08fec2ec34ba877f72cec98
|
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 3: Deep Learning and Neural Networks
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2015 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
import types
from lasagne.layers import DenseLayer
from lasagne.layers import InputLayer
from lasagne.nonlinearities import sigmoid
from lasagne.nonlinearities import rectify
from lasagne.updates import nesterov_momentum
from nolearn.lasagne import NeuralNet
layers0 = [('input', InputLayer),
('dense0', DenseLayer),
('output', DenseLayer)]
net0 = NeuralNet(layers=layers0,
input_shape=(None, 2),
dense0_num_units=5,
dense0_nonlinearity = sigmoid,
output_num_units=1,
output_nonlinearity=sigmoid,
update=nesterov_momentum,
update_learning_rate=0.5,
update_momentum=0.9,
regression=True,
eval_size=None,
verbose=1,
max_epochs=200)
X = [ [0,0], [0,1], [1,0], [1,1] ]
y = [ [0.0], [1.0], [1.0], [0.0] ]
def my_split(self, X, y, eval_size):
return X,X,y,y
net0.train_test_split = types.MethodType(my_split, net0)
net0.fit(X,y)
pred_y = net0.predict(X)
for element in zip(X,y,pred_y):
print("Input: {}, Ideal: {}, Actual: {}".format(element[0],element[1],element[2]))
|
PeterLauris/aifh
|
vol3/vol3-python-examples/examples/example_xor.py
|
Python
|
apache-2.0
| 2,029
|
[
"VisIt"
] |
e4a73ee0dba42cba3b48bd0ed52736511099ed95ea934453e9fa69d9b63ab2c5
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from sympy.printing.cxxcode import CXX11CodePrinter
class MooseFunctionPrinter(CXX11CodePrinter):
"""sympy printer for MOOSE C++ Function objects."""
def _print_BaseScalar(self, expr):
"""
Print p(0), p(1), p(2) instead of R.x, R.y, or R.z for inserting into value method
see sympy/sympy/vector/scalar.py
"""
return 'p({})'.format(expr._id[0])
def _print_Symbol(self, expr):
"""
Print _u instead of u, following the MOOSE member variable convention.
"""
s = str(expr)
if s.endswith(('_x', '_y', '_z')):
return '_{}({})'.format(s[:-2], 'xyz'.index(s[-1]))
elif s != 't':
return '_{}'.format(expr)
else:
return str(expr)
def moosefunction(expr, assign_to=None, **kwargs):
"""
Converts an expr to an MOOSE C++ expression for Function objects.
Inputs:
expr[sympy.core.Expr]: a sympy expression to be converted
"""
return MooseFunctionPrinter(**kwargs).doprint(expr, assign_to)
def print_moose(expr, **kwargs):
"""
Prints a C++ expression for a Function object.
"""
print(moosefunction(expr, **kwargs))
|
nuclear-wizard/moose
|
python/mms/moosefunction.py
|
Python
|
lgpl-2.1
| 1,501
|
[
"MOOSE"
] |
b4828ffef7e72816374a1220fab531765c3d4e65e92f955bcb9089f6b1549263
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Core module for Pelix.
Pelix is a Python framework that aims to act as OSGi as much as possible
:author: Thomas Calmant
:copyright: Copyright 2016, Thomas Calmant
:license: Apache License 2.0
:version: 0.6.4
..
Copyright 2016 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import imp
import importlib
import inspect
import logging
import os
import pkgutil
import sys
import threading
import uuid
# Pelix beans and constants
from pelix.constants import ACTIVATOR, ACTIVATOR_LEGACY, FRAMEWORK_UID, \
BundleException, FrameworkException
from pelix.internals.events import BundleEvent, ServiceEvent
from pelix.internals.registry import EventDispatcher, ServiceRegistry, \
ServiceReference, ServiceRegistration
# Pelix utility modules
from pelix.utilities import is_string
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (0, 6, 4)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# Prepare the module logger
_logger = logging.getLogger("pelix.main")
# ------------------------------------------------------------------------------
class Bundle(object):
"""
Represents a "bundle" in Pelix
"""
UNINSTALLED = 1
""" The bundle is uninstalled and may not be used """
INSTALLED = 2
""" The bundle is installed but not yet resolved """
RESOLVED = 4
""" The bundle is resolved and is able to be started """
STARTING = 8
""" The bundle is in the process of starting """
STOPPING = 16
""" The bundle is in the process of stopping """
ACTIVE = 32
""" The bundle is now running """
def __init__(self, framework, bundle_id, name, module):
"""
Sets up the bundle descriptor
:param framework: The host framework
:param bundle_id: The ID of the bundle in the host framework
:param name: The bundle symbolic name
:param module: The bundle module
"""
# A reentrant lock for synchronization
self._lock = threading.RLock()
# Bundle
self.__context = BundleContext(framework, self)
self.__id = bundle_id
self.__module = module
self.__name = name
self.__framework = framework
self._state = Bundle.RESOLVED
# Registered services
self.__registered_services = set()
self.__registration_lock = threading.Lock()
def __str__(self):
"""
String representation
"""
return "Bundle(ID={0}, Name={1})".format(self.__id, self.__name)
def __get_activator_method(self, method_name):
"""
Retrieves the requested method of the activator, or returns None
:param method_name: A method name
:return: A method, or None
"""
# Get the activator
activator = getattr(self.__module, ACTIVATOR, None)
if activator is None:
# Get the old activator
activator = getattr(self.__module, ACTIVATOR_LEGACY, None)
if activator is not None:
# Old activator found: print a deprecation warning
_logger.warning("Bundle %s uses the deprecated '%s' to declare"
" its activator. "
"Use @BundleActivator instead.",
self.__name, ACTIVATOR_LEGACY)
return getattr(activator, method_name, None)
def _fire_bundle_event(self, kind):
"""
Fires a bundle event of the given kind
:param kind: Kind of event
"""
self.__framework._dispatcher.fire_bundle_event(BundleEvent(kind, self))
def _registered_service(self, registration):
"""
Bundle is notified by the framework that a service has been registered
in the name of this bundle.
:param registration: The service registration object
"""
with self.__registration_lock:
self.__registered_services.add(registration)
def _unregistered_service(self, registration):
"""
Bundle is notified by the framework that a service has been
unregistered in the name of this bundle.
:param registration: The service registration object
"""
with self.__registration_lock:
self.__registered_services.discard(registration)
def get_bundle_context(self):
"""
Retrieves the bundle context
:return: The bundle context
"""
return self.__context
def get_bundle_id(self):
"""
Retrieves the bundle ID
:return: The bundle ID
"""
return self.__id
def get_location(self):
"""
Retrieves the location of this module
:return: The location of the Pelix module, or an empty string
"""
return getattr(self.__module, '__file__', "")
def get_module(self):
"""
Retrieves the Python module corresponding to the bundle
:return: The Python module
"""
return self.__module
def get_registered_services(self):
"""
Returns this bundle's ServiceReference list for all services it has
registered or an empty list
The list is valid at the time of the call to this method, however, as
the Framework is a very dynamic environment, services can be modified
or unregistered at any time.
:return: An array of ServiceReference objects
:raise BundleException: If the bundle has been uninstalled
"""
if self._state == Bundle.UNINSTALLED:
raise BundleException("Can't call 'get_registered_services' on an "
"uninstalled bundle")
return self.__framework._registry.get_bundle_registered_services(self)
def get_services_in_use(self):
"""
Returns this bundle's ServiceReference list for all services it is
using or an empty list.
A bundle is considered to be using a service if its use count for that
service is greater than zero.
The list is valid at the time of the call to this method, however, as
the Framework is a very dynamic environment, services can be modified
or unregistered at any time.
:return: An array of ServiceReference objects
:raise BundleException: If the bundle has been uninstalled
"""
if self._state == Bundle.UNINSTALLED:
raise BundleException("Can't call 'get_services_in_use' on an "
"uninstalled bundle")
return self.__framework._registry.get_bundle_imported_services(self)
def get_state(self):
"""
Retrieves the bundle state
:return: The bundle state
"""
return self._state
def get_symbolic_name(self):
"""
Retrieves the bundle symbolic name (its Python module name)
:return: The bundle symbolic name
"""
return self.__name
def get_version(self):
"""
Retrieves the bundle version, using the ``__version__`` or
``__version_info__`` attributes of its module.
:return: The bundle version, "0.0.0" by default
"""
# Get the version value
version = getattr(self.__module, "__version__", None)
if version:
return version
# Convert the __version_info__ entry
info = getattr(self.__module, "__version_info__", None)
if info:
return ".".join(str(part) for part in __version_info__)
# No version
return "0.0.0"
def start(self):
"""
Starts the bundle. Does nothing if the bundle is already starting or
active.
:raise BundleException: The framework is not yet started or the bundle
activator failed.
"""
if self.__framework._state not in (Bundle.STARTING, Bundle.ACTIVE):
# Framework is not running
raise BundleException("Framework must be started before its "
"bundles")
with self._lock:
if self._state in (Bundle.ACTIVE, Bundle.STARTING):
# Already started bundle, do nothing
return
# Store the bundle current state
previous_state = self._state
# Starting...
self._state = Bundle.STARTING
self._fire_bundle_event(BundleEvent.STARTING)
# Call the activator, if any
starter = self.__get_activator_method('start')
if starter is not None:
try:
# Call the start method
starter(self.__context)
except (FrameworkException, BundleException):
# Restore previous state
self._state = previous_state
# Re-raise directly Pelix exceptions
_logger.exception("Pelix error raised by %s while "
"starting", self.__name)
raise
except Exception as ex:
# Restore previous state
self._state = previous_state
# Raise the error
_logger.exception("Error raised by %s while starting",
self.__name)
raise BundleException(ex)
# Bundle is now active
self._state = Bundle.ACTIVE
self._fire_bundle_event(BundleEvent.STARTED)
def stop(self):
"""
Stops the bundle. Does nothing if the bundle is already stopped.
:raise BundleException: The bundle activator failed.
"""
if self._state != Bundle.ACTIVE:
# Invalid state
return
exception = None
with self._lock:
# Store the bundle current state
previous_state = self._state
# Stopping...
self._state = Bundle.STOPPING
self._fire_bundle_event(BundleEvent.STOPPING)
# Call the activator, if any
stopper = self.__get_activator_method('stop')
if stopper is not None:
try:
# Call the start method
stopper(self.__context)
except (FrameworkException, BundleException) as ex:
# Restore previous state
self._state = previous_state
# Re-raise directly Pelix exceptions
_logger.exception("Pelix error raised by %s while "
"stopping", self.__name)
exception = ex
except Exception as ex:
_logger.exception("Error raised by %s while stopping",
self.__name)
# Store the exception (raised after service clean up)
exception = BundleException(ex)
# Hide remaining services
self.__framework._hide_bundle_services(self)
# Intermediate bundle event : activator should have cleaned up
# everything, but some element could stay (iPOPO components, ...)
self._fire_bundle_event(BundleEvent.STOPPING_PRECLEAN)
# Remove remaining services (the hard way)
self.__unregister_services()
# Bundle is now stopped and all its services have been unregistered
self._state = Bundle.RESOLVED
self._fire_bundle_event(BundleEvent.STOPPED)
# Raise the exception, if any
# pylint: disable=E0702
# Pylint seems to miss the "is not None" check below
if exception is not None:
raise exception
def __unregister_services(self):
"""
Unregisters all bundle services
"""
# Copy the services list, as it will be modified during the process
with self.__registration_lock:
registered_services = self.__registered_services.copy()
for registration in registered_services:
try:
registration.unregister()
except BundleException:
# Ignore errors at this level
pass
if self.__registered_services:
_logger.warning("Not all services have been unregistered...")
with self.__registration_lock:
# Clear the list, just to be clean
self.__registered_services.clear()
def uninstall(self):
"""
Uninstalls the bundle
"""
with self._lock:
if self._state == Bundle.ACTIVE:
self.stop()
# Change the bundle state
self._state = Bundle.UNINSTALLED
# Call the framework
self.__framework.uninstall_bundle(self)
def update(self):
"""
Updates the bundle
"""
with self._lock:
# Was it active ?
restart = self._state == Bundle.ACTIVE
# Send the update event
self._fire_bundle_event(BundleEvent.UPDATE_BEGIN)
try:
# Stop the bundle
self.stop()
except:
# Something wrong occurred, notify listeners
self._fire_bundle_event(BundleEvent.UPDATE_FAILED)
raise
# Change the source file age
module_stat = None
module_file = getattr(self.__module, "__file__", None)
if module_file is not None and os.path.isfile(module_file):
try:
module_stat = os.stat(module_file)
# Change modification time to bypass weak time resolution
# of the underlying file system
os.utime(module_file,
(module_stat.st_atime, module_stat.st_mtime + 1))
except OSError:
# Can't touch the file
_logger.warning(
"Failed to update the modification time of '%s'. "
"The bundle update might not reflect the latest "
"changes.", module_file)
# Clean up the module constants (otherwise kept by reload)
# Keep special members (__name__, __file__, ...)
old_content = self.__module.__dict__.copy()
for name in list(self.__module.__dict__):
if not (name.startswith('__') and name.endswith('__')):
del self.__module.__dict__[name]
try:
# Reload the module
imp.reload(self.__module)
except (ImportError, SyntaxError) as ex:
# Exception raised if the file is unreadable
_logger.exception("Error updating %s: %s", self.__name, ex)
# Reset module content
self.__module.__dict__.clear()
self.__module.__dict__.update(old_content)
if module_stat is not None:
try:
# Reset times
os.utime(module_file,
(module_stat.st_atime, module_stat.st_mtime))
except OSError:
# Shouldn't occur, since we succeeded before the update
_logger.debug(
"Failed to reset the modification time of '%s'",
module_file)
if restart:
try:
# Re-start the bundle
self.start()
except:
# Something wrong occurred, notify listeners
self._fire_bundle_event(BundleEvent.UPDATE_FAILED)
raise
# Bundle update finished
self._fire_bundle_event(BundleEvent.UPDATED)
# ------------------------------------------------------------------------------
class Framework(Bundle):
"""
The Pelix framework (main) class. It must be instantiated using
FrameworkFactory
"""
def __init__(self, properties=None):
"""
Sets up the framework.
:param properties: The framework properties
"""
# Framework bundle set up
Bundle.__init__(self, self, 0, self.get_symbolic_name(),
sys.modules[__name__])
# Framework properties
if not isinstance(properties, dict):
self.__properties = {}
else:
# Use a copy of the properties, to avoid external changes
self.__properties = properties.copy()
# Generate a framework instance UUID, if needed
framework_uid = self.__properties.get(FRAMEWORK_UID)
if not framework_uid:
framework_uid = str(uuid.uuid4())
# Normalize the UID: it must be a string
self.__properties[FRAMEWORK_UID] = str(framework_uid)
# Properties lock
self.__properties_lock = threading.Lock()
# Bundles (start at 1, as 0 is reserved for the framework itself)
self.__next_bundle_id = 1
# Bundle ID -> Bundle object
self.__bundles = {}
# Bundles lock
self.__bundles_lock = threading.RLock()
# Event dispatcher
self._dispatcher = EventDispatcher()
# Service registry
self._registry = ServiceRegistry(self)
self.__unregistering_services = {}
# The wait_for_stop event (initially stopped)
self._fw_stop_event = threading.Event()
self._fw_stop_event.set()
def add_property(self, name, value):
"""
Adds a property to the framework **if it is not yet set**.
If the property already exists (same name), then nothing is done.
Properties can't be updated.
:param name: The property name
:param value: The value to set
:return: True if the property was stored, else False
"""
with self.__properties_lock:
if name in self.__properties:
# Already stored property
return False
self.__properties[name] = value
return True
def find_service_references(self, clazz=None, ldap_filter=None,
only_one=False):
"""
Finds all services references matching the given filter.
:param clazz: Class implemented by the service
:param ldap_filter: Service filter
:param only_one: Return the first matching service reference only
:return: A list of found reference, or None
:raise BundleException: An error occurred looking for service
references
"""
return self._registry.find_service_references(clazz, ldap_filter,
only_one)
def get_bundle_by_id(self, bundle_id):
"""
Retrieves the bundle with the given ID
:param bundle_id: ID of an installed bundle
:return: The requested bundle
:raise BundleException: The ID is invalid
"""
if bundle_id == 0:
# "System bundle"
return self
with self.__bundles_lock:
if bundle_id not in self.__bundles:
raise BundleException("Invalid bundle ID {0}"
.format(bundle_id))
return self.__bundles[bundle_id]
def get_bundle_by_name(self, bundle_name):
"""
Retrieves the bundle with the given name
:param bundle_name: Name of the bundle to look for
:return: The requested bundle, None if not found
"""
if bundle_name is None:
# Nothing to do
return None
if bundle_name is self.get_symbolic_name():
# System bundle requested
return self
with self.__bundles_lock:
for bundle in self.__bundles.values():
if bundle_name == bundle.get_symbolic_name():
# Found !
return bundle
# Not found...
return None
def get_bundles(self):
"""
Returns the list of all installed bundles
:return: the list of all installed bundles
"""
with self.__bundles_lock:
return list(self.__bundles[bundle_id]
for bundle_id in sorted(self.__bundles.keys()))
def get_properties(self):
"""
Retrieves a copy of the stored framework properties.
"""
with self.__properties_lock:
return self.__properties.copy()
def get_property(self, name):
"""
Retrieves a framework or system property. As framework properties don't
change while it's running, this method don't need to be protected.
:param name: The property name
"""
with self.__properties_lock:
return self.__properties.get(name, os.getenv(name))
def get_property_keys(self):
"""
Returns an array of the keys in the properties of the service
:return: An array of property keys.
"""
with self.__properties_lock:
return tuple(self.__properties.keys())
def get_service(self, bundle, reference):
"""
Retrieves the service corresponding to the given reference
:param bundle: The bundle requiring the service
:param reference: A service reference
:return: The requested service
:raise BundleException: The service could not be found
:raise TypeError: The argument is not a ServiceReference object
"""
if not isinstance(bundle, Bundle):
raise TypeError("First argument must be a Bundle object")
if not isinstance(reference, ServiceReference):
raise TypeError("Second argument must be a ServiceReference "
"object")
try:
# Unregistering service, just give it
return self.__unregistering_services[reference]
except KeyError:
return self._registry.get_service(bundle, reference)
def get_symbolic_name(self):
"""
Retrieves the framework symbolic name
:return: Always "pelix.framework"
"""
return "pelix.framework"
def install_bundle(self, name, path=None):
"""
Installs the bundle with the given name
*Note:* Before Pelix 0.5.0, this method returned the ID of the
installed bundle, instead of the Bundle object.
**WARNING:** The behavior of the loading process is subject to changes,
as it does not allow to safely run multiple frameworks in the same
Python interpreter, as they might share global module values.
:param name: A bundle name
:param path: Preferred path to load the module
:return: The installed Bundle object
:raise BundleException: Something happened
"""
with self.__bundles_lock:
# A bundle can't be installed twice
for bundle in self.__bundles.values():
if bundle.get_symbolic_name() == name:
_logger.warning('Already installed bundle: %s', name)
return bundle
# Load the module
try:
if path:
# Use the given path in priority
sys.path.insert(0, path)
try:
# The module has already been loaded
module = sys.modules[name]
except KeyError:
# Load the module
# __import__(name) -> package level
# import_module -> module level
module = importlib.import_module(name)
except (ImportError, IOError) as ex:
# Error importing the module
raise BundleException("Error installing bundle {0}: {1}"
.format(name, ex))
finally:
if path:
# Clean up the path. The loaded module(s) might
# have changed the path content, so do not use an
# index
sys.path.remove(path)
# Add the module to sys.modules, just to be sure
sys.modules[name] = module
# Compute the bundle ID
bundle_id = self.__next_bundle_id
# Prepare the bundle object and its context
bundle = Bundle(self, bundle_id, name, module)
# Store the bundle
self.__bundles[bundle_id] = bundle
# Update the bundle ID counter
self.__next_bundle_id += 1
# Fire the bundle installed event
event = BundleEvent(BundleEvent.INSTALLED, bundle)
self._dispatcher.fire_bundle_event(event)
return bundle
def install_package(self, path, recursive=False, prefix=None):
"""
Installs all the modules found in the given package
:param path: Path of the package (folder)
:param recursive: If True, install the sub-packages too
:param prefix: (**internal**) Prefix for all found modules
:return: A 2-tuple, with the list of installed bundles and the list
of failed modules names
:raise ValueError: Invalid path
"""
if not path:
raise ValueError("Empty path")
elif not is_string(path):
raise ValueError("Path must be a string")
# Use an absolute path
path = os.path.abspath(path)
if not os.path.exists(path):
raise ValueError("Inexistent path: {0}".format(path))
# Create a simple visitor
def visitor(fullname, is_package, module_path):
"""
Package visitor: accepts everything in recursive mode,
else avoids packages
"""
return recursive or not is_package
# Set up the prefix if needed
if prefix is None:
prefix = os.path.basename(path)
bundles = set()
failed = set()
with self.__bundles_lock:
try:
# Install the package first, resolved from the parent directory
bundles.add(self.install_bundle(prefix, os.path.dirname(path)))
# Visit the package
visited, sub_failed = self.install_visiting(path, visitor,
prefix)
# Update the sets
bundles.update(visited)
failed.update(sub_failed)
except BundleException as ex:
# Error loading the module
_logger.warning("Error loading package %s: %s", prefix, ex)
failed.add(prefix)
return bundles, failed
def install_visiting(self, path, visitor, prefix=None):
"""
Installs all the modules found in the given path if they are accepted
by the visitor.
The visitor must be a callable accepting 3 parameters:
* fullname: The full name of the module
* is_package: If True, the module is a package
* module_path: The path to the module file
:param path: Root search path
:param visitor: The visiting callable
:param prefix: (**internal**) Prefix for all found modules
:return: A 2-tuple, with the list of installed bundles and the list
of failed modules names
:raise ValueError: Invalid path or visitor
"""
# Validate the path
if not path:
raise ValueError("Empty path")
elif not is_string(path):
raise ValueError("Path must be a string")
# Validate the visitor
if visitor is None:
raise ValueError("No visitor method given")
# Use an absolute path
path = os.path.abspath(path)
if not os.path.exists(path):
raise ValueError("Inexistent path: {0}".format(path))
# Set up the prefix if needed
if prefix is None:
prefix = os.path.basename(path)
bundles = set()
failed = set()
with self.__bundles_lock:
# Use an ImpImporter per iteration because, in Python 3,
# pkgutil.iter_modules() will use a _FileImporter on the second
# walk in a package which will return nothing
for name, is_package in pkgutil.ImpImporter(path).iter_modules():
# Compute the full name of the module
fullname = '.'.join((prefix, name)) if prefix else name
try:
if visitor(fullname, is_package, path):
if is_package:
# Install the package
bundles.add(self.install_bundle(fullname, path))
# Visit the package
sub_path = os.path.join(path, name)
sub_bundles, sub_failed = \
self.install_visiting(sub_path, visitor,
fullname)
bundles.update(sub_bundles)
failed.update(sub_failed)
else:
# Install the bundle
bundles.add(self.install_bundle(fullname, path))
except BundleException as ex:
# Error loading the module
_logger.warning("Error visiting %s: %s", fullname, ex)
# Try the next module
failed.add(fullname)
continue
return bundles, failed
def register_service(self, bundle, clazz, service, properties, send_event):
"""
Registers a service and calls the listeners
:param bundle: The bundle registering the service
:param clazz: Name(s) of the interface(s) implemented by service
:param properties: Service properties
:param send_event: If not, doesn't trigger a service registered event
:return: A ServiceRegistration object
:raise BundleException: An error occurred while registering the service
"""
if bundle is None or service is None or not clazz:
raise BundleException("Invalid registration parameters")
if not isinstance(properties, dict):
# Be sure we have a valid dictionary
properties = {}
else:
# Use a copy of the given properties
properties = properties.copy()
# Prepare the class specification
if not isinstance(clazz, list):
# Make a list from the single class
clazz = [clazz]
# Test the list content
classes = []
for svc_clazz in clazz:
if inspect.isclass(svc_clazz):
# Keep the type name
svc_clazz = svc_clazz.__name__
if not svc_clazz or not is_string(svc_clazz):
# Invalid class name
raise BundleException("Invalid class name: {0}"
.format(svc_clazz))
# Class OK
classes.append(svc_clazz)
# Make the service registration
registration = self._registry.register(bundle, classes, properties,
service)
# Update the bundle registration information
bundle._registered_service(registration)
if send_event:
# Call the listeners
event = ServiceEvent(ServiceEvent.REGISTERED,
registration.get_reference())
self._dispatcher.fire_service_event(event)
return registration
def start(self):
"""
Starts the framework
:return: True if the bundle has been started, False if it was already
running
:raise BundleException: A bundle failed to start
"""
with self._lock:
if self._state in (Bundle.STARTING, Bundle.ACTIVE):
# Already started framework
return
# Reset the stop event
self._fw_stop_event.clear()
# Starting...
self._state = Bundle.STARTING
self._dispatcher.fire_bundle_event(
BundleEvent(BundleEvent.STARTING, self))
# Start all registered bundles (use a copy, just in case...)
for bundle in self.__bundles.copy().values():
try:
bundle.start()
except FrameworkException as ex:
# Important error
_logger.exception("Important error starting bundle: %s",
bundle)
if ex.needs_stop:
# Stop the framework (has to be in active state)
self._state = Bundle.ACTIVE
self.stop()
return False
except BundleException:
# A bundle failed to start : just log
_logger.exception("Error starting bundle: %s", bundle)
# Bundle is now active
self._state = Bundle.ACTIVE
return True
def stop(self):
"""
Stops the framework
:return: True if the framework stopped, False it wasn't running
"""
with self._lock:
if self._state != Bundle.ACTIVE:
# Invalid state
return False
# Hide all services (they will be deleted by bundle.stop())
for bundle in self.__bundles.values():
self._registry.hide_bundle_services(bundle)
# Stopping...
self._state = Bundle.STOPPING
self._dispatcher.fire_bundle_event(
BundleEvent(BundleEvent.STOPPING, self))
# Notify listeners that the bundle is stopping
self._dispatcher.fire_framework_stopping()
bid = self.__next_bundle_id - 1
while bid > 0:
bundle = self.__bundles.get(bid)
bid -= 1
if bundle is None or bundle.get_state() != Bundle.ACTIVE:
# Ignore inactive bundle
continue
try:
bundle.stop()
except Exception as ex:
# Just log exceptions
_logger.exception("Error stopping bundle %s: %s",
bundle.get_symbolic_name(), ex)
# Framework is now stopped
self._state = Bundle.RESOLVED
self._dispatcher.fire_bundle_event(
BundleEvent(BundleEvent.STOPPED, self))
# All bundles have been stopped, release "wait_for_stop"
self._fw_stop_event.set()
# Force the registry clean up
self._registry.clear()
return True
def uninstall(self):
"""
A framework can't be uninstalled
:raise BundleException: This method must not be called
"""
raise BundleException("A framework can't be uninstalled")
def uninstall_bundle(self, bundle):
"""
Ends the uninstallation of the given bundle (must be called by Bundle)
:param bundle: The bundle to uninstall
:raise BundleException: Invalid bundle
"""
if bundle is None:
# Do nothing
return
with self.__bundles_lock:
# Stop the bundle first
bundle.stop()
bundle_id = bundle.get_bundle_id()
if bundle_id not in self.__bundles:
raise BundleException("Invalid bundle {0}".format(bundle))
# Notify listeners
self._dispatcher.fire_bundle_event(
BundleEvent(BundleEvent.UNINSTALLED, bundle))
# Remove it from the dictionary
del self.__bundles[bundle_id]
# Remove it from the system => avoid unintended behaviors and
# forces a complete module reload if it is re-installed
name = bundle.get_symbolic_name()
try:
del sys.modules[name]
except KeyError:
# Ignore
pass
try:
# Clear reference in parent
parent, basename = name.rsplit('.', 1)
if parent:
delattr(sys.modules[parent], basename)
except (KeyError, AttributeError, ValueError):
# Ignore errors
pass
def unregister_service(self, registration):
"""
Unregisters the given service
:param registration: A ServiceRegistration to the service to unregister
:raise BundleException: Invalid reference
"""
assert isinstance(registration, ServiceRegistration)
# Get the Service Reference
reference = registration.get_reference()
# Remove the service from the registry
svc_instance = self._registry.unregister(reference)
# Keep a track of the unregistering reference
self.__unregistering_services[reference] = svc_instance
# Call the listeners
event = ServiceEvent(ServiceEvent.UNREGISTERING, reference)
self._dispatcher.fire_service_event(event)
# Update the bundle registration information
bundle = reference.get_bundle()
bundle._unregistered_service(registration)
# Remove the unregistering reference
del self.__unregistering_services[reference]
return True
def _hide_bundle_services(self, bundle):
"""
Hides the services of the given bundle in the service registry
:param bundle: The bundle providing services
:return: The references of the hidden services
"""
assert isinstance(bundle, Bundle)
return self._registry.hide_bundle_services(bundle)
def update(self):
"""
Stops and starts the framework, if the framework is active.
:raise BundleException: Something wrong occurred while stopping or
starting the framework.
"""
with self._lock:
if self._state == Bundle.ACTIVE:
self.stop()
self.start()
def wait_for_stop(self, timeout=None):
"""
Waits for the framework to stop. Does nothing if the framework bundle
is not in ACTIVE state.
Uses a threading.Condition object
:param timeout: The maximum time to wait (in seconds)
:return: True if the framework has stopped, False if the timeout raised
"""
if self._state != Bundle.ACTIVE:
# Inactive framework, ignore the call
return True
self._fw_stop_event.wait(timeout)
with self._lock:
# If the timeout raised, we should be in another state
return self._state == Bundle.RESOLVED
# ------------------------------------------------------------------------------
class BundleContext(object):
"""
The bundle context is the link between a bundle and the framework.
It is unique for a bundle and is created by the framework once the bundle
is installed.
"""
def __init__(self, framework, bundle):
"""
Sets up the bundle context
:param framework: Hosting framework
:param bundle: The associated bundle
"""
self.__bundle = bundle
self.__framework = framework
def __str__(self):
"""
String representation
"""
return "BundleContext({0})".format(self.__bundle)
def add_bundle_listener(self, listener):
"""
Registers a bundle listener, which will be notified each time a bundle
is installed, started, stopped or updated.
The listener must be a callable accepting a single parameter:\
* **event** -- The description of the event
(a :class:`~BundleEvent` object).
:param listener: The bundle listener to register
:return: True if the listener has been registered, False if it already
was
"""
return self.__framework._dispatcher.add_bundle_listener(listener)
def add_framework_stop_listener(self, listener):
"""
Registers a listener that will be called back right before the
framework stops
The framework listener must have a method with the following prototype::
def framework_stopping(self):
'''
No parameter given
'''
# ...
:param listener: The framework stop listener
:return: True if the listener has been registered
"""
return self.__framework._dispatcher.add_framework_listener(listener)
def add_service_listener(self, listener, ldap_filter=None,
specification=None):
"""
Registers a service listener
The service listener must have a method with the following prototype::
def service_changed(self, event):
'''
Called by Pelix when some service properties changes
event: A ServiceEvent object
'''
# ...
:param listener: The listener to register
:param ldap_filter: Filter that must match the service properties
(optional, None to accept all services)
:param specification: The specification that must provide the service
(optional, None to accept all services)
:return: True if the listener has been successfully registered
"""
return self.__framework._dispatcher.add_service_listener(
listener, specification, ldap_filter)
def get_all_service_references(self, clazz, ldap_filter=None):
"""
Returns an array of ServiceReference objects.
The returned array of ServiceReference objects contains services that
were registered under the specified class and match the specified
filter expression.
:param clazz: Class implemented by the service
:param ldap_filter: Service filter
:return: The sorted list of all matching service references, or None
"""
return self.__framework.find_service_references(clazz, ldap_filter)
def get_bundle(self, bundle_id=None):
"""
Retrieves the :class:`~pelix.framework.Bundle` object for the bundle
matching the given ID (int). If no ID is given (None), the bundle
associated to this context is returned.
:param bundle_id: A bundle ID (optional)
:return: The requested :class:`~pelix.framework.Bundle` object
:raise BundleException: The given ID doesn't exist or is invalid
"""
if bundle_id is None:
# Current bundle
return self.__bundle
elif type(bundle_id) is Bundle:
# Got a bundle (compatibility with older install_bundle())
bundle_id = bundle_id.get_bundle_id()
return self.__framework.get_bundle_by_id(bundle_id)
def get_bundles(self):
"""
Returns the list of all installed bundles
:return: A list of :class:`~pelix.framework.Bundle` objects
"""
return self.__framework.get_bundles()
def get_property(self, name):
"""
Returns the value of a property of the framework, else returns the OS
environment value.
:param name: A property name
"""
return self.__framework.get_property(name)
def get_service(self, reference):
"""
Returns the service described with the given reference
:param reference: A ServiceReference object
:return: The service object itself
"""
return self.__framework.get_service(self.__bundle, reference)
def get_service_reference(self, clazz, ldap_filter=None):
"""
Returns a ServiceReference object for a service that implements and \
was registered under the specified class
:param clazz: The class name with which the service was registered.
:param ldap_filter: A filter on service properties
:return: A service reference, None if not found
"""
return self.__framework.find_service_references(
clazz, ldap_filter, True)
def get_service_references(self, clazz, ldap_filter=None):
"""
Returns the service references for services that were registered under
the specified class by this bundle and matching the given filter
:param clazz: The class name with which the service was registered.
:param ldap_filter: A filter on service properties
:return: The list of references to the services registered by the
calling bundle and matching the filters.
"""
refs = self.__framework.find_service_references(clazz, ldap_filter)
if refs:
for ref in refs:
if ref.get_bundle() is not self.__bundle:
refs.remove(ref)
return refs
def install_bundle(self, name, path=None):
"""
Installs the bundle (module) with the given name.
If a path is given, it is inserted in first place in the Python loading
path (``sys.path``). All modules loaded alongside this bundle, *i.e.*
by this bundle or its dependencies, will be looked after in this path
in priority.
.. note::
Before Pelix 0.5.0, this method returned the ID of the installed
bundle, instead of the Bundle object.
.. warning::
The behavior of the loading process is subject to changes, as it
does not allow to safely run multiple frameworks in the same Python
interpreter, as they might share global module values.
:param name: The name of the bundle to install
:param path: Preferred path to load the module (optional)
:return: The :class:`~pelix.framework.Bundle` object of the installed
bundle
:raise BundleException: Error importing the module or one of its
dependencies
"""
return self.__framework.install_bundle(name, path)
def install_package(self, path, recursive=False):
"""
Installs all the modules found in the given package (directory).
It is a utility method working like
:meth:`~pelix.framework.BundleContext.install_visiting`, with a visitor
accepting every module found.
:param path: Path of the package (folder)
:param recursive: If True, installs the modules found in sub-directories
:return: A 2-tuple, with the list of installed bundles
(:class:`~pelix.framework.Bundle`) and the list of the names
of the modules which import failed.
:raise ValueError: The given path is invalid
"""
return self.__framework.install_package(path, recursive)
def install_visiting(self, path, visitor):
"""
Looks for modules in the given path and installs those accepted by the
given visitor.
The visitor must be a callable accepting 3 parameters:\
* **fullname** -- The full name of the module
* **is_package** -- If True, the module is a package
* **module_path** -- The path to the module file
:param path: Root search path (folder)
:param visitor: The visiting callable
:return: A 2-tuple, with the list of installed bundles
(:class:`~pelix.framework.Bundle`) and the list of the names
of the modules which import failed.
:raise ValueError: Invalid path or visitor
"""
return self.__framework.install_visiting(path, visitor)
def register_service(self, clazz, service, properties, send_event=True):
"""
Registers a service
:param clazz: Class or Classes (list) implemented by this service
:param service: The service instance
:param properties: The services properties (dictionary)
:param send_event: If not, doesn't trigger a service registered event
:return: A ServiceRegistration object
:raise BundleException: An error occurred while registering the service
"""
return self.__framework.register_service(
self.__bundle, clazz, service, properties, send_event)
def remove_bundle_listener(self, listener):
"""
Unregisters the given bundle listener
:param listener: The bundle listener to remove
:return: True if the listener has been unregistered,
False if it wasn't registered
"""
return self.__framework._dispatcher.remove_bundle_listener(listener)
def remove_framework_stop_listener(self, listener):
"""
Unregisters a framework stop listener
:param listener: The framework stop listener
:return: True if the listener has been unregistered
"""
return self.__framework._dispatcher.remove_framework_listener(listener)
def remove_service_listener(self, listener):
"""
Unregisters a service listener
:param listener: The service listener
:return: True if the listener has been unregistered
"""
return self.__framework._dispatcher.remove_service_listener(listener)
def unget_service(self, reference):
"""
Disables a reference to the service
:return: True if the bundle was using this reference, else False
"""
# Lose the dependency
return self.__framework._registry.unget_service(
self.__bundle, reference)
# ------------------------------------------------------------------------------
class FrameworkFactory(object):
"""
A framework factory
"""
__singleton = None
""" The framework singleton """
@classmethod
def get_framework(cls, properties=None):
"""
If it doesn't exist yet, creates a framework with the given properties,
else returns the current framework instance.
:return: A Pelix instance
"""
# Normalize sys.path
normalize_path()
if cls.__singleton is None:
cls.__singleton = Framework(properties)
return cls.__singleton
@classmethod
def is_framework_running(cls, framework=None):
"""
Tests if the given framework has been constructed and not deleted.
If *framework* is None, then the methods returns if at least one
framework is running.
:param framework: The framework instance to be tested
:return: True if the framework is running
"""
if framework is None:
return cls.__singleton is not None
else:
return cls.__singleton == framework
@classmethod
def delete_framework(cls, framework=None):
"""
Removes the framework singleton
:return: True on success, else False
"""
if framework is None:
framework = cls.__singleton
if framework is cls.__singleton:
# Stop the framework
try:
framework.stop()
except:
_logger.exception("Error stopping the framework")
# Uninstall its bundles
bundles = framework.get_bundles()
for bundle in bundles:
try:
bundle.uninstall()
except:
_logger.exception("Error uninstalling bundle %s",
bundle.get_symbolic_name())
# Clear the event dispatcher
framework._dispatcher.clear()
# Clear the singleton
cls.__singleton = None
return True
return False
# ------------------------------------------------------------------------------
def create_framework(bundles, properties=None,
auto_start=False, wait_for_stop=False, auto_delete=False):
"""
Creates a Pelix framework, installs the given bundles and returns its
instance reference.
If *auto_start* is True, the framework will be started once all bundles
will have been installed
If *wait_for_stop* is True, the method will return only when the framework
will have stopped. This requires *auto_start* to be True.
If *auto_delete* is True, the framework will be deleted once it has
stopped, and the method will return None.
This requires *wait_for_stop* and *auto_start* to be True.
:param bundles: Bundles to initially install (shouldn't be empty if
*wait_for_stop* is True)
:param properties: Optional framework properties
:param auto_start: If True, the framework will be started immediately
:param wait_for_stop: If True, the method will return only when the
framework will have stopped
:param auto_delete: If True, deletes the framework once it stopped.
:return: The framework instance
:raise ValueError: Only one framework can run at a time
"""
# Test if a framework already exists
if FrameworkFactory.is_framework_running(None):
raise ValueError('A framework is already running')
# Create the framework
framework = FrameworkFactory.get_framework(properties)
# Install bundles
context = framework.get_bundle_context()
for bundle in bundles:
context.install_bundle(bundle)
if auto_start:
# Automatically start the framework
framework.start()
if wait_for_stop:
# Wait for the framework to stop
try:
framework.wait_for_stop(None)
except KeyboardInterrupt:
# Stop keyboard interruptions
if framework.get_state() == Bundle.ACTIVE:
framework.stop()
if auto_delete:
# Delete the framework
FrameworkFactory.delete_framework(framework)
framework = None
return framework
def _package_exists(path):
"""
Checks if the given Python path matches a valid file or a valid container
file
:param path: A Python path
:return: True if the module or its container exists
"""
name = path
while name:
if os.path.exists(name):
return True
else:
name = os.path.dirname(name)
return False
def normalize_path():
"""
Normalizes sys.path to avoid the use of relative folders
"""
# Normalize Python paths
sys.path = [os.path.abspath(path) for path in sys.path
if os.path.exists(path)]
# Add the "static" current path
sys.path.insert(0, os.getcwd())
# Keep the "dynamic" current folder indicator
sys.path.insert(0, '')
# Normalize paths in loaded modules
for name, module in sys.modules.items():
try:
module.__path__ = [
os.path.abspath(path) for path in module.__path__
if _package_exists(path)]
except AttributeError:
# builtin modules don't have a __path__
pass
except ImportError:
pass
|
ahmadshahwan/ipopo
|
pelix/framework.py
|
Python
|
apache-2.0
| 56,724
|
[
"VisIt"
] |
aa82497d6387fd9e49d8f709a6f5511209978d4a01c8c732d74c938fcbac85a8
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*********************************************
**RealND** -
*********************************************
This is the object which represents N-dimensional vector. It is an extended Real3D,
basicly, it hase the same functionallity but in N-dimetions.
First of all it is usefull for classes in 'espressopp.analysis'.
Description
...
.. function:: espressopp.__RealND(\*args)
:param \*args:
:type \*args:
.. function:: espressopp.toRealNDFromVector(\*args)
:param \*args:
:type \*args:
.. function:: espressopp.toRealND(\*args)
:param \*args:
:type \*args:
"""
from _espressopp import RealND
from espressopp import esutil
# This injects additional methods into the RealND class and pulls it
# into this module
class __RealND(RealND) :
__metaclass__ = esutil.ExtendBaseClass
'''
__originit = RealND.__init__
def __init__(self, *args):
if len(args) == 0:
x = y = z = 0.0
elif len(args) == 1:
arg0 = args[0]
if isinstance(arg0, RealND):
x = arg0.x
y = arg0.y
z = arg0.z
# test whether the argument is iterable and has 3 elements
elif hasattr(arg0, '__iter__') and len(arg0) == 3:
x, y, z = arg0
elif isinstance(arg0, float) or isinstance(arg0, int):
x = y = z = arg0
else :
raise TypeError("Cannot initialize RealND from %s" % (args))
elif len(args) == 3 :
x, y, z = args
else :
raise TypeError("Cannot initialize RealND from %s" % (args))
return self.__originit(x, y, z)
'''
# string conversion
def __str__(self) :
arr = []
for i in range(self.dimension):
arr.append(self[i])
return str(arr)
def __repr__(self) :
return 'RealND' + str(self)
def toRealNDFromVector(*args):
"""Try to convert the arguments to a RealND.
This function will only convert to a RealND if x, y and z are
specified."""
arg0 = args[0]
if isinstance(arg0, RealND):
return arg0
elif hasattr(arg0, '__iter__'):
return RealND(*args)
else:
raise TypeError("Something wrong in toRealNDFromVector")
def toRealND(*args):
"""Try to convert the arguments to a RealND, returns the argument,
if it is already a RealND."""
if len(args) == 1 and isinstance(args[0], RealND):
return args[0]
else:
return RealND(*args)
|
capoe/espressopp.soap
|
src/RealND.py
|
Python
|
gpl-3.0
| 3,386
|
[
"ESPResSo"
] |
675c29809b1f0d3296b91c56db1938bb374e4ca5cbac2369838bbd6035c073a1
|
# Script to parse ECP files into XML format for use by libecpint
# Usage: python parseecp.py [NAME]
# will convert raw/NAME.ecp into xml/NAME.xml
# which can then be found by libecpint using reference "NAME"
from lxml import etree
# letter symbols for angular momentum shells
# can be added to if needed
angular_shells = ["s", "p", "d", "f", "g", "h", "i"]
class Shell:
""" Container for a shell of an ECP
i.e. the Gaussian expansion of a fixed angular momentum
Members:
lval - angular momentum quantumm number of shell
powers - the power of r for each Gaussian in the shell
nexp - the number of primitive Gaussians (exponents) in the shell
exps - the exponents of the Gaussians
contr - the contraction coefficients of the Gaussians
"""
def __init__(self, lval = 0, nexp = 0):
"""Creates a Shell with angular momentum lval and nexp exponents"""
self.lval = lval
self.powers = []
self.nexp = nexp
self.exps = []
self.contr = []
class Atom:
""" Container for an ECP for a specific atom type
Members:
name - name of the atom, e.g. 'O' for oxygen
ncore - the number of core electrons in the ECP
maxl - the maximum angular momentum shell in the ECP
nshells - the number of shells in the ECP
shells - an array of Shell objects describing the shells in the ECP
"""
def __init__(self, name="X", nshells = 0):
"""Creates a blank Atom with the given name"""
self.name = name
self.ncore = 0
self.maxl = 0
self.nshells = nshells
self.shells = []
def tokenize(line, sep=','):
"""Given a line of input, cleans up and returns tokens,
split by the separator (sep)
"""
# strip out whitespace and split by separator
line = line.strip()
tokens = line.split(sep)
# get rid of additional whitespace
for token in tokens:
token = token.replace(' ', '')
return tokens
def parse_ecp(file):
"""Given a MOLPRO-format ECP file, returns Atom objects (ECPs) for every
atom type defined in that file.
"""
atoms = []
atomnames = {}
# read in the file
lines = file.readlines()
nlines = len(lines)
# loop to the end of the file
linenumber = 0
atomnumber = 0
while linenumber < nlines:
tokens = tokenize(lines[linenumber], sep=',')
# all lines where ECP definitions start will have
# at least two bits "ecp,AtomName"
if (len(tokens) > 2):
if (tokens[0].lower() == "ecp"):
# Found an ECP definition
atom_name = tokens[1].lower()
# Create a container for the ECP
new_atom = Atom(name=atom_name)
# the next two tokens should be the no. of core electrons
# and the maximum angular momentum shell of the ECP
new_atom.ncore = int(tokens[2])
new_atom.maxl = int(tokens[3])
# there should then be maxl+1 lines definining the shells
# in the order [maxL, 0, 1, ..., maxL-1]
for i in range(new_atom.maxl+1):
linenumber += 1
tokens = tokenize(lines[linenumber], sep=';')
# could be blank lines or comments, so check first
if(len(tokens) > 1):
# shell definition has form "nx; n,x,c; n,x,c; ..."
# defining nx Gaussians of the form c * r^n * exp(-x*r^2)
nprims = int(tokens[0])
l = i-1
if (i==0):
l = new_atom.maxl
# create a container for the shell
new_shell = Shell(lval=l, nexp=nprims)
# fill in the details of the shell as described above
for token in tokens[1:]:
subtokens = token.split(',')
if (len(subtokens) == 3):
new_shell.powers.append(subtokens[0])
new_shell.exps.append(subtokens[1])
new_shell.contr.append(subtokens[2])
# append the new shell to the Atom
new_atom.shells.append(new_shell)
# end of Atom definition, append
atoms.append(new_atom)
linenumber += 1
# Return all the atoms found
return atoms
def write_ecp_basis(atoms, name):
"""Given a list of Atom objects defining ECPs, and a name for the ECP basis,
this writes the basis to XML file.
"""
filename = "xml/" + name + ".xml"
# write into a binary xml file using LXML package
with open(filename, 'wb') as new_file:
# format is Root -> Atom1 --> Shell1
# Shell2 ... etc
# Atom2 --> etc.
root = etree.Element("root", name=name)
tree = etree.ElementTree(root)
for atom in atoms:
child = etree.SubElement(root, atom.name, ncore = str(atom.ncore), maxl=str(atom.maxl))
for shell in atom.shells:
schild = etree.SubElement(child, "Shell", lval=str(shell.lval), nexp=str(shell.nexp))
for i in range(shell.nexp):
try:
xchild = etree.SubElement(schild, "nxc", n=shell.powers[i], x=shell.exps[i], c=shell.contr[i])
except:
# something is wrong with the definition of this shell
print("ERROR in " + filename + " (" + name + ") : atom " + atom.name + ", shell type " + shell.lval)
print("Expected no. of powers/exps/coeffs:", shell.nexp)
print("Actual no. of powers: ", len(shell.powers))
print("Actual no. of exps: ", len(shell.exps))
print("Actual no. of coeffs: ", len(shell.contr))
tree.write(new_file, pretty_print = True)
if __name__ == "__main__":
# Given a raw MOLPRO ECP file in raw/name.ecp, writes xml/name.xml
import sys
name = sys.argv[1]
input_file = open('raw/' + name + '.ecp', 'r')
atoms = parse_ecp(input_file)
write_ecp_basis(atoms, name)
|
robashaw/libecpint
|
share/libecpint/parseecp.py
|
Python
|
mit
| 6,428
|
[
"Gaussian",
"Molpro"
] |
e7c8ef5f834efbc0dc7a0fa2e5b79b20303a69cb379ecac0565297362b957224
|
#!/usr/bin/env python
"""Display mean length of sequences in a fasta file.
Usage:
%program file.fasta"""
import sys
try:
from Bio import SeqIO
except:
print "This program requires the Biopython library"
sys.exit(0)
try:
handle = open(sys.argv[1], 'rU')
lengths = map(lambda seq: len(seq.seq), SeqIO.parse(handle, 'fasta'))
print reduce(lambda x,y: x+y, lengths)/float(len(lengths))
except:
print __doc__
|
enormandeau/Scripts
|
fasta_mean_len.py
|
Python
|
gpl-3.0
| 440
|
[
"Biopython"
] |
0080d15e4e4dd9e199e5c44e876c01340bf395bac40fda5d16063cd826fab26d
|
import click
from .pottemps import compute_pottemps
@click.group()
def generators():
return
@generators.command()
@click.argument('dest_dir')
@click.option('--num_procs', type=int,
default=1,
help="Number of processes to use. Default: 1.")
@click.option('--test', is_flag=True,
help="Simple testing. Create a much less precise table.")
@click.option('--approx', is_flag=True,
help="Use approximate equivalent potential temperature.")
def pottemps(dest_dir, num_procs, test, approx):
"""
This script computes two data files:
DEST_DIR/massfraction_air_pottempequi_g_ref_common_pressures.dat
DEST_DIR/massfraction_air_pottemp_g_ref_common_pressures.dat
or
DEST_DIR/massfraction_air_pottempequiapprox_g_ref_common_pressures.dat
DEST_DIR/massfraction_air_pottemp_g_ref_common_pressures.dat
if option '--approx' is specified.
These data files can be used with the pyteos_nc compute to compute moist air
equivalent potential temperature and dry (virtual) potential temperature,
respectively, from CF-compliants netcdf files:
For example,
$ pyteos_nc DEST_DIR/massfraction_air_pottempequi_g_ref_common_pressures.dat in.nc out.nc
would compute pottempequi, the equivalent potential temperature, from file in.nc and
store the result in out.nc.
In this example, in.nc should contain variables ta (air temperature),
hus (specific humidity) and air pressure (pa).
"""
compute_pottemps(dest_dir, num_procs, test, approx)
|
laliberte/pyteos_nc
|
pyteos_nc/generators/__init__.py
|
Python
|
gpl-3.0
| 1,575
|
[
"NetCDF"
] |
ccf0121a7ebc0f3a6ff46a5f29aeb52f01d1b0e6c2c7346537dd35ed747be1a4
|
"""Host configuration parameters.
Modify these paths to suit your installation.
"""
# Main modelsim executable, full absolute path.
MODELSIM_EXEC_PATH = "D:/dev/FPGA/Modelsim-ASE/modelsim_ase/win32aloem/vsim.exe"
# Software simulator executable, relative path (with extension if in win32).
SWSIM_EXEC_PATH = "../../tools/ion32sim/bin/ion32sim.exe"
|
jaruiz/ION
|
tools/runtest/config.py
|
Python
|
lgpl-3.0
| 352
|
[
"ASE"
] |
61363a5c91fe77bdece60d714645062c73292b784072c3d81f6f1a09b2b61009
|
__author__ = 'sibirrer'
from astrofunc.LensingProfiles.nfw import NFW
from astrofunc.LensingProfiles.nfw_ellipse import NFW_ELLIPSE
import numpy as np
import numpy.testing as npt
import pytest
class TestNFW(object):
"""
tests the Gaussian methods
"""
def setup(self):
self.nfw = NFW()
def test_function(self):
x = np.array([1])
y = np.array([2])
Rs = 1.
rho0 = 1
theta_Rs = self.nfw._rho02alpha(rho0, Rs)
values = self.nfw.function(x, y, Rs, theta_Rs)
npt.assert_almost_equal(values[0], 2.4764530888727556, decimal=5)
x = np.array([0])
y = np.array([0])
Rs = 1.
rho0 = 1
theta_Rs = self.nfw._rho02alpha(rho0, Rs)
values = self.nfw.function(x, y, Rs, theta_Rs)
npt.assert_almost_equal(values[0], 0, decimal=4)
x = np.array([2,3,4])
y = np.array([1,1,1])
values = self.nfw.function(x, y, Rs, theta_Rs)
npt.assert_almost_equal(values[0], 2.4764530888727556, decimal=5)
npt.assert_almost_equal(values[1], 3.5400250357511416, decimal=5)
npt.assert_almost_equal(values[2], 4.5623722261790647, decimal=5)
def test_derivatives(self):
x = np.array([1])
y = np.array([2])
Rs = 1.
rho0 = 1
theta_Rs = self.nfw._rho02alpha(rho0, Rs)
f_x, f_y = self.nfw.derivatives(x, y, Rs, theta_Rs)
npt.assert_almost_equal(f_x[0], 0.53211690764331998, decimal=5)
npt.assert_almost_equal(f_y[0], 1.06423381528664, decimal=5)
x = np.array([0])
y = np.array([0])
theta_Rs = 0
f_x, f_y = self.nfw.derivatives(x, y, Rs, theta_Rs)
npt.assert_almost_equal(f_x[0], 0, decimal=5)
npt.assert_almost_equal(f_y[0], 0, decimal=5)
x = np.array([1,3,4])
y = np.array([2,1,1])
rho0 = 1
theta_Rs = self.nfw._rho02alpha(rho0, Rs)
values = self.nfw.derivatives(x, y, Rs, theta_Rs)
npt.assert_almost_equal(values[0][0], 0.53211690764331998, decimal=5)
npt.assert_almost_equal(values[1][0], 1.06423381528664, decimal=5)
npt.assert_almost_equal(values[0][1], 1.0493927480837946, decimal=5)
npt.assert_almost_equal(values[1][1], 0.34979758269459821, decimal=5)
def test_hessian(self):
x = np.array([1])
y = np.array([2])
Rs = 1.
rho0 = 1
theta_Rs = self.nfw._rho02alpha(rho0, Rs)
f_xx, f_yy,f_xy = self.nfw.hessian(x, y, Rs, theta_Rs)
npt.assert_almost_equal(f_xx[0], 0.40855527280658294, decimal=5)
npt.assert_almost_equal(f_yy[0], 0.037870368296371637, decimal=5)
npt.assert_almost_equal(f_xy[0], -0.2471232696734742, decimal=5)
x = np.array([1,3,4])
y = np.array([2,1,1])
values = self.nfw.hessian(x, y, Rs, theta_Rs)
npt.assert_almost_equal(values[0][0], 0.40855527280658294, decimal=5)
npt.assert_almost_equal(values[1][0], 0.037870368296371637, decimal=5)
npt.assert_almost_equal(values[2][0], -0.2471232696734742, decimal=5)
npt.assert_almost_equal(values[0][1], -0.046377502475445781, decimal=5)
npt.assert_almost_equal(values[1][1], 0.30577812878681554, decimal=5)
npt.assert_almost_equal(values[2][1], -0.13205836172334798, decimal=5)
class TestMassAngleConversion(object):
"""
test angular to mass unit conversions
"""
def setup(self):
self.nfw = NFW()
self.nfw_ellipse = NFW_ELLIPSE()
def test_angle(self):
x, y = 1, 0
alpha1, alpha2 = self.nfw.derivatives(x, y, theta_Rs=1., Rs=1.)
assert alpha1 == 1.
def test_convertAngle2rho(self):
rho0 = self.nfw._alpha2rho0(theta_Rs=1., Rs=1.)
assert rho0 == 0.81472283831773229
def test_convertrho02angle(self):
theta_Rs_in = 1.5
Rs = 1.5
rho0 = self.nfw._alpha2rho0(theta_Rs=theta_Rs_in, Rs=Rs)
theta_Rs_out = self.nfw._rho02alpha(rho0, Rs)
assert theta_Rs_in == theta_Rs_out
if __name__ == '__main__':
pytest.main()
|
sibirrer/astrofunc
|
test/test_nfw.py
|
Python
|
mit
| 4,093
|
[
"Gaussian"
] |
373cf8764814b8e48f3c7cab4af65a5d8ff6b94fdd560a1b43b4991b9e6e2ebb
|
from director import vtkAll as vtk
from director import vtkNumpy as vnp
from director.shallowCopy import shallowCopy
import numpy as np
def encodePolyData(polyData):
'''Given a vtkPolyData, returns a numpy int8 array that contains
the serialization of the data. This array can be passed to the
decodePolyData function to construct a new vtkPolyData object from
the serialized data.'''
if not hasattr(vtk, 'vtkCommunicator'):
w = vtk.vtkPolyDataWriter()
w.WriteToOutputStringOn()
w.SetInput(polyData)
w.Write()
return np.frombuffer(w.GetOutputStdString(), dtype=np.int8)
charArray = vtk.vtkCharArray()
vtk.vtkCommunicator.MarshalDataObject(polyData, charArray)
numpyArray = vnp.numpy_support.vtk_to_numpy(charArray)
assert numpyArray.dtype == np.int8
return numpyArray
def decodePolyData(data):
'''Given a numpy int8 array, deserializes the data to construct a new
vtkPolyData object and returns the result.'''
if not hasattr(vtk, 'vtkCommunicator'):
r = vtk.vtkPolyDataReader()
r.ReadFromInputStringOn()
r.SetInputString(str(data.data))
r.Update()
return shallowCopy(r.GetOutput())
charArray = vnp.getVtkFromNumpy(data)
assert isinstance(charArray, vtk.vtkCharArray)
polyData = vtk.vtkPolyData()
vtk.vtkCommunicator.UnMarshalDataObject(charArray, polyData)
return polyData
|
RobotLocomotion/director
|
src/python/director/geometryencoder.py
|
Python
|
bsd-3-clause
| 1,432
|
[
"VTK"
] |
991dc6649f0b38fc27276ec38fa19186cb4394603dc3c4e50d1fbd43649e1f47
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# Django
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
# app
from apps.books.views import BookView
urlpatterns = [
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
# api
url(r'^api/book/', BookView.as_view()),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
# url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
|
TraMZzz/test
|
server/project/urls.py
|
Python
|
bsd-3-clause
| 1,132
|
[
"VisIt"
] |
e434a189f1332fd48ecd523ee768496e409ca5252fdeaefe29ab751443142117
|
r"""
Sequences (:mod:`skbio.sequence`)
=================================
.. currentmodule:: skbio.sequence
This module provides classes for storing and working with biological sequences,
including generic sequences which have no restrictions on which characters can
be included, and sequences based on IUPAC-defined sets of allowed characters
(including degenerate characters), including ``DNA``, ``RNA`` and ``Protein``
sequences. Common operations are defined as methods, for example computing the
reverse complement of a DNA sequence, or searching for N-glycosylation motifs
in ``Protein`` sequences. Class attributes are available to obtain valid
character sets, complement maps for different sequence types, and for obtaining
degenerate character definitions. Additionally this module defines the
``GeneticCode`` class, which represents an immutable object that translates DNA
or RNA sequences into protein sequences.
The primary information stored for each different type of sequence object is
the underlying sequence data itself. This is stored as an immutable Numpy
array. Additionally, each type of sequence may include optional metadata
and positional metadata. Note that metadata and positional metadata are
mutable.
Classes
-------
.. autosummary::
:toctree: generated/
Sequence
DNA
RNA
Protein
GeneticCode
Examples
--------
>>> from skbio import DNA, RNA
New sequences are created with optional metadata and positional metadata
fields. Metadata is stored as a Python dict, while positional metadata
becomes a Pandas DataFrame.
>>> d = DNA('ACCGGGTA')
>>> d = DNA('ACCGGGTA', metadata={'id':"my-sequence", 'description':"GFP"},
... positional_metadata={'quality':[22, 25, 22, 18, 23, 25, 25, 25]})
>>> d = DNA('ACCGGTA', metadata={'id':"my-sequence"})
New sequences can also be created from existing sequences, for example as their
reverse complement or degapped (i.e., unaligned) version.
>>> d1 = DNA('.ACC--GGG-TA...', metadata={'id':'my-sequence'})
>>> d2 = d1.degap()
>>> d2
DNA
-----------------------------
Metadata:
'id': 'my-sequence'
Stats:
length: 8
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 62.50%
-----------------------------
0 ACCGGGTA
>>> d3 = d2.reverse_complement()
>>> d3
DNA
-----------------------------
Metadata:
'id': 'my-sequence'
Stats:
length: 8
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 62.50%
-----------------------------
0 TACCCGGT
It's also straightforward to compute distances between sequences (optionally
using user-defined distance metrics, the default is Hamming distance which
requires that the sequences being compared are the same length) for use in
sequence clustering, phylogenetic reconstruction, etc.
>>> r1 = RNA('GACCCGCUUU')
>>> r2 = RNA('GCCCCCCUUU')
>>> r1.distance(r2)
0.2
Similarly, you can calculate the percent (dis)similarity between a pair of
aligned sequences.
>>> r3 = RNA('ACCGUUAGUC')
>>> r4 = RNA('ACGGGU--UC')
>>> r3.match_frequency(r4, relative=True)
0.6
>>> r3.mismatch_frequency(r4, relative=True)
0.4
Sequences can be searched for known motif types. This returns the slices that
describe the matches.
>>> r5 = RNA('AGG-GGACUGAA')
>>> for motif in r5.find_motifs('purine-run', min_length=2):
... motif
slice(0, 3, None)
slice(4, 7, None)
slice(9, 12, None)
Those slices can be used to extract the relevant subsequences.
>>> for motif in r5.find_motifs('purine-run', min_length=2):
... r5[motif]
... print('')
RNA
-----------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 66.67%
-----------------------------
0 AGG
<BLANKLINE>
RNA
-----------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 66.67%
-----------------------------
0 GGA
<BLANKLINE>
RNA
-----------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 33.33%
-----------------------------
0 GAA
<BLANKLINE>
And gaps or other features can be ignored while searching, as these may disrupt
otherwise meaningful motifs.
>>> for motif in r5.find_motifs('purine-run', min_length=2, ignore=r5.gaps()):
... r5[motif]
... print('')
RNA
-----------------------------
Stats:
length: 7
has gaps: True
has degenerates: False
has non-degenerates: True
GC-content: 66.67%
-----------------------------
0 AGG-GGA
<BLANKLINE>
RNA
-----------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 33.33%
-----------------------------
0 GAA
<BLANKLINE>
In the above example, removing gaps from the resulting motif matches is easily
achieved, as the sliced matches themselves are sequences of the same type as
the input.
>>> for motif in r5.find_motifs('purine-run', min_length=2, ignore=r5.gaps()):
... r5[motif].degap()
... print('')
RNA
-----------------------------
Stats:
length: 6
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 66.67%
-----------------------------
0 AGGGGA
<BLANKLINE>
RNA
-----------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 33.33%
-----------------------------
0 GAA
<BLANKLINE>
Sequences can similarly be searched for arbitrary patterns using regular
expressions.
>>> for match in r5.find_with_regex('(G+AC[UT])'):
... match
slice(4, 9, None)
DNA can be transcribed to RNA:
>>> dna = DNA('ATGTGTATTTGA')
>>> rna = dna.transcribe()
>>> rna
RNA
-----------------------------
Stats:
length: 12
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 25.00%
-----------------------------
0 AUGUGUAUUU GA
Both DNA and RNA can be translated into a protein sequence. For example, let's
translate our DNA and RNA sequences using NCBI's standard genetic code (table
ID 1, the default genetic code in scikit-bio):
>>> protein_from_dna = dna.translate()
>>> protein_from_dna
Protein
-----------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has non-degenerates: True
has stops: True
-----------------------------
0 MCI*
>>> protein_from_rna = rna.translate()
>>> protein_from_rna
Protein
-----------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has non-degenerates: True
has stops: True
-----------------------------
0 MCI*
The two translations are equivalent:
>>> protein_from_dna == protein_from_rna
True
Class-level methods contain information about the molecule types.
>>> DNA.degenerate_map['B']
set(['C', 'T', 'G'])
>>> RNA.degenerate_map['B']
set(['C', 'U', 'G'])
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from skbio.util import TestRunner
from ._sequence import Sequence
from ._protein import Protein
from ._dna import DNA
from ._rna import RNA
from ._genetic_code import GeneticCode
__all__ = ['Sequence', 'Protein', 'DNA', 'RNA', 'GeneticCode']
test = TestRunner(__file__).test
|
demis001/scikit-bio
|
skbio/sequence/__init__.py
|
Python
|
bsd-3-clause
| 7,644
|
[
"scikit-bio"
] |
b161b5d5b352534a88e7d29501a29336284882f5923b0fb657e770cb100cf3c6
|
#!/usr/bin/env python
#--------------------------------------------------------------------
# load module
#
import time
start_time = time.time()
import sys
import os
import numpy as np
import scipy.io.netcdf as nio
from netCDF4 import Dataset as NCDataset
# from Scientific.IO.NetCDF import NetCDFFile as NCDataset # No Scientific on Hexagon
print("--- %s s is used for loading libs! ---" % (time.time() - start_time))
#time.sleep(1.1)
#--------------------------------------------------------------------
# define parameters
#
if len(sys.argv) < 1:
print "The file name and variable name are necessary!!!"
sys.exit()
DataFilename = sys.argv[1]
#orgdate = float(sys.argv[2])
trgdate = float(sys.argv[2])
trgsecs = float(sys.argv[3])
#--------------------------------------------------------------------
tData = NCDataset(DataFilename, 'r+')
print("--- %s s is used for loading libs! ---" % (time.time() - start_time))
date = tData.variables['date'][:].copy()
#date = date - (orgdate-trgdate)*100
#date = trgdate
#secs = trgsecs
#print date
#sys.exit()
tData.variables['date'][:] = trgdate
tData.variables['datesec'][:] = trgsecs
#print tData.variables['date'][:]
tData.sync()
tData.close()
#--------------------------------------------------------------------
print("--- %s s is used for loading libs! ---" % (time.time() - start_time))
|
TheEarnest/Spectral_nudging
|
models/CAM4/func_change_CAM_yymmddss.py
|
Python
|
gpl-3.0
| 1,345
|
[
"NetCDF"
] |
69f810b88b95c35e55f4d877264c6d26e3fb115582b1f3fc7a00997a94128a20
|
#!/usr/bin/env python3
#
# Copyright (C) 2018-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import re
import os
import sys
import requests
if not os.environ['CI_COMMIT_REF_NAME'].startswith('PR-'):
exit(0)
PR = os.environ['CI_COMMIT_REF_NAME'][3:]
URL = 'https://api.github.com/repos/espressomd/espresso/issues/' + \
PR + '/comments'
HEADERS = {'Authorization': 'token ' + os.environ['GITHUB_TOKEN']}
SIZELIMIT = 5000
doc_type, has_warnings, filepath_warnings = sys.argv[-3:]
has_warnings = has_warnings != '0'
prefix = {'sphinx': 'doc', 'doxygen': 'dox'}[doc_type]
TOKEN_ESPRESSO_CI = prefix + '_warnings.sh'
# Delete all existing comments
comments = requests.get(URL, headers=HEADERS)
comments.raise_for_status()
for comment in comments.json():
if comment['user']['login'] == 'espresso-ci' and \
TOKEN_ESPRESSO_CI in comment['body']:
response = requests.delete(comment['url'], headers=HEADERS)
response.raise_for_status()
# If documentation raised warnings, post a new comment
if has_warnings:
with open(filepath_warnings) as f:
warnings = f.read().strip()
warnings = warnings.replace('@', '\\')
assert warnings.count('\n') >= 1, 'list of warnings is missing newlines'
# the logfile must be guarded by backticks
backticks = max(['``'] + re.findall('`+', warnings), key=len) + '`'
assert len(backticks) < 12, 'cannot guard logfile warnings with backticks'
# format message
summary, warnings = warnings.split('\n', 1)
comment = 'Your pull request does not meet our code documentation rules. '
comment += summary + '\n\n' + backticks + '\n'
if len(warnings) > SIZELIMIT:
for line in warnings.split('\n'):
if len(comment) + len(line) > SIZELIMIT - 200:
break
comment += line + '\n'
comment = comment.rstrip() + '\n' + backticks + '\n'
comment += (
'\nThis list was truncated, check the [container logfile]'
'({}) for the complete list.\n'.format(os.environ['CI_JOB_URL']))
else:
comment += warnings.rstrip() + '\n' + backticks + '\n'
comment += (
'\nYou can generate these warnings with `make -t; make {}; '
'../maintainer/CI/{}_warnings.sh` using the maxset config. This is '
'the same command that I have executed to generate the log above.'
.format(doc_type, prefix))
assert TOKEN_ESPRESSO_CI in comment
response = requests.post(URL, headers=HEADERS, json={'body': comment})
response.raise_for_status()
|
KaiSzuttor/espresso
|
maintainer/gh_post_docs_warnings.py
|
Python
|
gpl-3.0
| 3,198
|
[
"ESPResSo"
] |
7e237716066761a9a6a8a22a7b9f1b19b926bf341b2c4aed5c8a0b7cea1f2ea5
|
# objectedit.py ---
#
# Filename: objectedit.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Wed Jun 30 11:18:34 2010 (+0530)
# Version:
# Last-Updated: Fri Feb 01 11:05:59 2017 (+0530)
# By: Harsha
# Update #:
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
# This code is for a widget to edit MOOSE objects. We can now track if
# a field is a Value field and make it editable accordingly. There
# seems to be no clean way of determining whether the field is worth
# plotting (without a knowledge of the model/biology there is no way
# we can tell this). But we can of course check if the field is a
# numeric one.
#
#
# Change log:
#
# Wed Jun 30 11:18:34 2010 (+0530) - Originally created by Subhasis
# Ray, the model and the view
#
# Modified/adapted to dh_branch by Chaitanya/Harsharani
#
# Thu Apr 18 18:37:31 IST 2013 - Reintroduced into multiscale GUI by
# Subhasis
#
# Fri Apr 19 15:05:53 IST 2013 - Subhasis added undo redo
# feature. Create ObjectEditModel as part of ObjectEditView.
# Tue Mar 7 16:10:54 IST 2017 - Harsha now Pool or BufPool can be interchangable
# by setting/unsetting isbuffered field
# Fri May 17 23:45:59 2017 (+0530) - Harsha added, notes header,
# Kd is calculated for the second order reaction and value is displayed
# Tue Jun 18 12:10:54 IST 2018 - Harsha now group boundary color can be editable from the object editor
# Mon Sep 10 16:21:00 IST 2018 - When name is edited, the editorTitle gets updated
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import PyQt4
from PyQt4 import QtCore
from PyQt4 import QtGui
from PyQt4.QtGui import QTextEdit
from PyQt4.QtGui import QWidget
from PyQt4.QtGui import QGridLayout
from PyQt4.QtGui import QVBoxLayout
from PyQt4.QtGui import QSizePolicy
from PyQt4.QtCore import QMargins
from PyQt4.QtGui import QSplitter
import sys
from collections import deque
import traceback
#sys.path.append('../python')
import moose
import defaults
import config
#from plugins.kkitUtil import getColor
from moose.chemUtil.chemConnectUtil import getColor
#these fields will be ignored
extra_fields = ['this',
'me',
'parent',
'path',
'children',
'linearSize',
'objectDimensions',
'lastDimension',
'localNumField',
'pathIndices',
'msgOut',
'msgIn',
'diffConst',
'speciesId',
'Coordinates',
'neighbors',
'DiffusionArea',
'DiffusionScaling',
'x',
'x0',
'x1',
'dx',
'nx',
'y',
'y0',
'y1',
'dy',
'ny',
'z',
'z0',
'z1',
'dz',
'nz',
'coords',
'isToroid',
'preserveNumEntries',
# 'numKm',
'numSubstrates',
'concK1',
'meshToSpace',
'spaceToMesh',
'surface',
'method',
'alwaysDiffuse',
'numData',
'numField',
'valueFields',
'sourceFields',
'motorConst',
'destFields',
'dt',
'tick',
'idValue',
'index',
'fieldIndex'
]
class ObjectEditModel(QtCore.QAbstractTableModel):
"""Model class for editing MOOSE elements. This is not to be used
directly, except that its undo and redo slots should be connected
to by the GUI actions for the same.
SIGNALS:
objectNameChanged(PyQt_PyObject): when a moose object's name is
changed, this signal is emitted with the object as argument. This
can be captured by widgets that display the object name.
dataChanged: emitted when any data is changed in the moose object
"""
objectNameChanged = QtCore.pyqtSignal('PyQt_PyObject')
# dataChanged = QtCore.pyqtSignal('PyQt_PyObject')
def __init__(self, datain, headerdata=['Field','Value'], undolen=100, parent=None, *args):
QtCore.QAbstractTableModel.__init__(self, parent, *args)
self.fieldFlags = {}
self.fields = []
self.mooseObject = datain
self.headerdata = headerdata
self.undoStack = deque(maxlen=undolen)
self.redoStack = deque(maxlen=undolen)
self.checkState_ = False
for fieldName in self.mooseObject.getFieldNames('valueFinfo'):
if fieldName in extra_fields :
continue
value = self.mooseObject.getField(fieldName)
self.fields.append(fieldName)
#harsha: For signalling models will be pulling out notes field from Annotator
# can updates if exist for other types also
if (isinstance (self.mooseObject,moose.ChemCompt) or \
isinstance(self.mooseObject,moose.ReacBase) or \
isinstance(moose.element(moose.element(self.mooseObject).parent),moose.EnzBase) \
):
pass
else:
self.fields.append("Color")
flag = QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEditable
self.fieldFlags[fieldName] = flag
if ( isinstance(self.mooseObject, moose.ReacBase) ) :
self.fields.append("Kd")
flag = QtCore.Qt.ItemIsEnabled
self.fieldFlags[fieldName] = flag
def rowCount(self, parent):
return len(self.fields)
def columnCount(self, parent):
return len(self.headerdata)
def setData(self, index, value, role=QtCore.Qt.EditRole):
if not index.isValid() or index.row () >= len(self.fields) or index.column() != 1:
return False
field = self.fields[index.row()]
if (role == QtCore.Qt.CheckStateRole):
if (index.column() == 1):
self.checkState_ = value
return True
else:
value = str(value.toString()).strip() # convert Qt datastructure to Python string
if len(value) == 0:
return False
if field == "Notes":
field = "notes"
ann = moose.Annotator(self.mooseObject.path+'/info')
oldValue = ann.getField(field)
value = type(oldValue)(value)
ann.setField(field,value)
self.undoStack.append((index,oldValue))
elif field == "vector":
for ch in ['[',']']:
if ch in value:
value = value.replace(ch," ")
value = value.replace(",", " ")
valuelist = []
if value.find(',') != -1:
valuelist = value.split(",")
elif value.find(' ') != -1:
valuelist = value.split(" ")
else:
valuelist = value
vectorlist = []
for d in valuelist:
try:
float(d)
vectorlist.append(float(d))
except:
pass
from numpy import array
a = array( vectorlist )
self.mooseObject.setField(field, a)
else:
oldValue = self.mooseObject.getField(field)
if field != "isBuffered":
value = type(oldValue)(value)
self.mooseObject.setField(field, value)
else:
if self.mooseObject.className == "ZombiePool" or self.mooseObject.className =="ZombieBufPool":
QtGui.QMessageBox.warning(None,'Solver is set, Could not set the value','\n Unset the solver by clicking \n run widget -> Preferences -> Exponential Euler->Apply')
else:
if value.lower() in ("yes", "true", "t", "1"):
self.mooseObject.setField(field, True)
else:
self.mooseObject.setField(field, False)
self.undoStack.append((index, oldValue))
if field == 'name':
self.emit(QtCore.SIGNAL('objectNameChanged(PyQt_PyObject)'), self.mooseObject)
return True
self.dataChanged.emit(index, index)
return True
def undo(self):
print ('Undo')
if len(self.undoStack) == 0:
raise Info('No more undo information')
index, oldvalue, = self.undoStack.pop()
field = self.fields[index.row()]
currentvalue = self.mooseObject.getField(field)
oldvalue = type(currentvalue)(oldvalue)
self.redoStack.append((index, str(currentvalue)))
self.mooseObject.setField(field, oldvalue)
if field == 'name':
self.objectNameChanged.emit(self.mooseObject)
self.emit(QtCore.SIGNAL('dataChanged(const QModelIndex&, const QModelIndex&)'), index, index)
def redo(self):
if len(self.redoStack) ==0:
raise Info('No more redo information')
index, oldvalue, = self.redoStack.pop()
currentvalue = self.mooseObject.getField(self.fields[index.row()])
self.undoStack.append((index, str(currentvalue)))
self.mooseObject.setField(self.fields[index.row()], type(currentvalue)(oldvalue))
if field == 'name':
self.emit(QtCore.SIGNAL('objectNameChanged(PyQt_PyObject)'), self.mooseObject)
self.emit(QtCore.SIGNAL('dataChanged(const QModelIndex&, const QModelIndex&)'), index, index)
def flags(self, index):
flag = QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
#flag = QtCore.Qt.NoItemFlags
if not index.isValid():
return None
# Replacing the `outrageous` up stuff with something sensible
field = self.fields[index.row()]
newstr = field[0]
newstr = newstr.upper()
field_string = newstr + field[1:]
setter = 'set%s' %(field_string)
#setter = 'set_%s' % (self.fields[index.row()])
#print " from Object setter",setter, "object",self.mooseObject, " ",self.mooseObject.getFieldNames('destFinfo');
if index.column() == 1:
# if field == "Color":
# flag = QtCore.Qt.ItemIsEnabled
if field == "Notes":
ann = moose.Annotator(self.mooseObject.path+'/info')
if setter in ann.getFieldNames('destFinfo'):
flag |= QtCore.Qt.ItemIsEditable
if isinstance(self.mooseObject, moose.PoolBase) or isinstance(self.mooseObject,moose.Function):
if field == 'volume' or field == 'expr':
pass
elif setter in self.mooseObject.getFieldNames('destFinfo'):
flag |= QtCore.Qt.ItemIsEditable
else:
if setter in self.mooseObject.getFieldNames('destFinfo'):
flag |= QtCore.Qt.ItemIsEditable
#if field == "Notes":
# flag |= QtCore.Qt.ItemIsEditable
# !! Replaced till here
return flag
def data(self, index, role):
ret = None
field = self.fields[index.row()]
if index.column() == 0 and role == QtCore.Qt.DisplayRole:
try:
ret = QtCore.QVariant(QtCore.QString(field)+' ('+defaults.FIELD_UNITS[field]+')')
except KeyError:
ret = QtCore.QVariant(QtCore.QString(field))
elif index.column() == 1:
if role==QtCore.Qt.CheckStateRole:
if ((str(field) == "plot Conc") or (str(field) == "plot n") ):
# print index.data(QtCore.Qt. ), str(field)
return self.checkState_
elif (role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole):
try:
if (str(field) =="Color" ):
return QtGui.QPushButton("Press Me!")
if (str(field) =="Kd" ):
#ret = self.mooseObject.getField(str(field))
Kd = 0
if self.mooseObject.className == "ZombieReac" or self.mooseObject.className == "Reac":
if self.mooseObject.numSubstrates > 1 or self.mooseObject.numProducts > 1:
if self.mooseObject.Kf != 0:
Kd = self.mooseObject.Kb/self.mooseObject.Kf
#Kd = QtCore.QVariant(QtCore.QString(str(ret)))
ret = QtCore.QVariant(QtCore.QString(str(Kd)))
if ( (str(field) != "Notes") and (str(field) != "className") and (str(field) != "Kd")):
ret = self.mooseObject.getField(str(field))
ret = QtCore.QVariant(QtCore.QString(str(ret)))
elif(str(field) == "className"):
ret = self.mooseObject.getField(str(field))
if 'Zombie' in ret:
ret = ret.split('Zombie')[1]
ret = QtCore.QVariant(QtCore.QString(str(ret)))
elif(str(field) == "Notes"):
astr = self.mooseObject.path+'/info'
mastr = moose.Annotator(astr)
ret = (mastr).getField(str('notes'))
ret = QtCore.QVariant(QtCore.QString(str(ret)))
except ValueError:
ret = None
return ret
def headerData(self, col, orientation, role):
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
return QtCore.QVariant(self.headerdata[col])
return QtCore.QVariant()
class ObjectEditView(QtGui.QTableView):
"""View class for object editor.
This class creates an instance of ObjectEditModel using the moose
element passed as its first argument.
undolen - specifies the size of the undo stack. By default set to
OBJECT_EDIT_UNDO_LENGTH constant in defaults.py. Specify something smaller if
large number of objects are likely to be edited.
To enable undo/redo conect the corresponding actions from the gui
to view.model().undo and view.model().redo slots.
"""
def __init__(self, mobject, undolen=defaults.OBJECT_EDIT_UNDO_LENGTH, parent=None):
QtGui.QTableView.__init__(self, parent)
#self.setEditTriggers(self.DoubleClicked | self.SelectedClicked | self.EditKeyPressed)
vh = self.verticalHeader()
vh.setVisible(False)
hh = self.horizontalHeader()
hh.setStretchLastSection(True)
self.setAlternatingRowColors(True)
self.resizeColumnsToContents()
self.setModel(ObjectEditModel(mobject, undolen=undolen))
self.colorButton = QtGui.QPushButton()
self.colorDialog = QtGui.QColorDialog()
self.textEdit = QTextEdit()
try:
notesIndex = self.model().fields.index("Notes")
self.setIndexWidget(self.model().index(notesIndex,1), self.textEdit)
info = moose.Annotator(self.model().mooseObject.path+'/info')
self.textEdit.setText(QtCore.QString(info.getField('notes')))
self.setRowHeight(notesIndex, self.rowHeight(notesIndex) * 3)
# self.colorDialog.colorSelected.connect(
# lambda color:
#
# self.setColor(getColor(self.model().mooseObject.path+'/info')[1])
except:
pass
try:
colorIndex = self.model().fields.index("Color")
self.colorButton.clicked.connect(self.colorDialog.show)
self.colorButton.setFocusPolicy(PyQt4.QtCore.Qt.NoFocus)
self.colorDialog.colorSelected.connect(
lambda color: self.colorButton.setStyleSheet(
"QPushButton {"
+ "background-color: {0}; color: {0};".format(color.name())
+ "}"
)
)
self.setIndexWidget(self.model().index(colorIndex,1), self.colorButton)
# self.colorDialog.colorSelected.connect(
# lambda color:
#
self.setColor(getColor(self.model().mooseObject.path+'/info')[1])
except:
pass
print ('Created view with %s' %(mobject))
def setColor(self, color):
self.colorButton.setStyleSheet(
"QPushButton {"
+ "background-color: {0}; color: {0};".format(color)
+ "}"
)
self.colorDialog.setCurrentColor(color)
def dataChanged(self, tl, br):
QtGui.QTableView.dataChanged(self, tl, br)
self.viewport().update()
class ObjectEditDockWidget(QtGui.QDockWidget):
"""A dock widget whose title is set by the current moose
object. Allows switching the moose object. It stores the created
view in a dict for future use.
TODO possible performance issue: storing the views (along with
their models) ensures the undo history for each object is
retained. But without a limit on the number of views stored, it
will be wasteful on memory.
"""
objectNameChanged = QtCore.pyqtSignal('PyQt_PyObject')
colorChanged = QtCore.pyqtSignal(object, object)
def __init__(self, mobj='/', parent=None, flags=None):
QtGui.QDockWidget.__init__(self, parent=parent)
mobj = moose.element(mobj)
#self.view = view = ObjectEditView(mobj)
self.view = view = ObjectEditView(mobj)
self.view_dict = {mobj: view}
base = QWidget()
layout = QVBoxLayout()
base.setLayout(layout)
layout.addWidget(self.view)
layout.addWidget(QTextEdit())
self.setWidget(base)
self.setWindowTitle('Edit: %s' % (mobj.path))
# self.view.colorDialog.colorSelected.connect(self.colorChangedEmit)
# def clearDict(self):
# self.view_dict.clear()
def setObject(self, mobj):
element = moose.element(mobj)
try:
view = self.view_dict[element]
except KeyError:
view = ObjectEditView(element)
self.view_dict[element] = view
view.model().objectNameChanged.connect(self.emitObjectNameChanged)
view.colorDialog.colorSelected.connect(lambda color: self.colorChanged.emit(element, color))
textEdit = QTextEdit()
view.setSizePolicy( QSizePolicy.Ignored
, QSizePolicy.Ignored
)
textEdit.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
base = QSplitter()
base.setOrientation(PyQt4.QtCore.Qt.Vertical)
layout = QVBoxLayout()
layout.addWidget(view)#, 0, 0)
lineedit = QtGui.QLineEdit("Notes:")
lineedit.setReadOnly(True)
layout.addWidget(lineedit)
if ( isinstance(mobj, moose.PoolBase)
or isinstance(mobj,moose.ReacBase)
or isinstance(mobj,moose.EnzBase)
) :
info = moose.Annotator(mobj.path +'/info')
textEdit.setText(QtCore.QString(info.getField('notes')))
textEdit.textChanged.connect(lambda : info.setField('notes', str(textEdit.toPlainText())))
layout.addWidget(textEdit)#,1,0)
# self.setRowHeight(notesIndex, self.rowHeight(notesIndex) * 3)
base.setLayout(layout)
# base.setSizes( [ view.height()
# , base.height() - view.height()
# ]
# )
# print("a =>", view.height())
# print("b =>", base.height())
# layout.setStretch(0,3)
# layout.setStretch(1,1)
# layout.setContentsMargins(QMargins(0,0,0,0))
self.setWidget(base)
self.setWindowTitle('Edit: %s' % (element.path))
view.update()
def emitObjectNameChanged(self, mobj):
self.objectNameChanged.emit(mobj)
self.setWindowTitle('Edit:%s'%(mobj.path))
def main():
app = QtGui.QApplication(sys.argv)
mainwin = QtGui.QMainWindow()
c = moose.Compartment("test")
view = ObjectEditView(c, undolen=3)
mainwin.setCentralWidget(view)
action = QtGui.QAction('Undo', mainwin)
action.setShortcut('Ctrl+z')
action.triggered.connect(view.model().undo)
mainwin.menuBar().addAction(action)
action = QtGui.QAction('Redo', mainwin)
action.setShortcut('Ctrl+y')
action.triggered.connect(view.model().redo)
mainwin.menuBar().addAction(action)
mainwin.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
# ojectedit.py ends here
|
BhallaLab/moose
|
moose-gui/objectedit.py
|
Python
|
gpl-3.0
| 21,805
|
[
"MOOSE"
] |
ecdfdb1eb8988809ffcc37ab7935d6d872a28cd5c58baed23e7d54a7b4986576
|
# Copyright (C) 2020 Atsushi Togo
# All rights reserved.
#
# This file is part of phono3py.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import time
import numpy as np
from phonopy.phonon.degeneracy import degenerate_sets
from phono3py.phonon3.conductivity import (Conductivity, all_bands_exist,
unit_to_WmK)
from phono3py.phonon3.conductivity import write_pp as _write_pp
from phono3py.phonon3.collision_matrix import CollisionMatrix
from phono3py.phonon3.triplets import get_grid_points_by_rotations
from phono3py.file_IO import (write_kappa_to_hdf5,
write_collision_to_hdf5,
read_collision_from_hdf5,
write_collision_eigenvalues_to_hdf5,
write_unitary_matrix_to_hdf5,
read_pp_from_hdf5)
from phonopy.units import THzToEv, Kb
def get_thermal_conductivity_LBTE(
interaction,
symmetry,
temperatures=None,
sigmas=None,
sigma_cutoff=None,
is_isotope=False,
mass_variances=None,
grid_points=None,
boundary_mfp=None, # in micrometre
solve_collective_phonon=False,
is_reducible_collision_matrix=False,
is_kappa_star=True,
gv_delta_q=1e-4, # for group velocity
is_full_pp=False,
pinv_cutoff=1.0e-8,
pinv_solver=0, # default: dsyev in lapacke
write_collision=False,
read_collision=False,
write_kappa=False,
write_pp=False,
read_pp=False,
write_LBTE_solution=False,
compression="gzip",
input_filename=None,
output_filename=None,
log_level=0):
if temperatures is None:
_temperatures = [300, ]
else:
_temperatures = temperatures
if sigmas is None:
sigmas = []
if log_level:
print("-" * 19 + " Lattice thermal conducitivity (LBTE) " + "-" * 19)
print("Cutoff frequency of pseudo inversion of collision matrix: %s" %
pinv_cutoff)
if read_collision:
temps = None
else:
temps = _temperatures
lbte = Conductivity_LBTE(
interaction,
symmetry,
grid_points=grid_points,
temperatures=temps,
sigmas=sigmas,
sigma_cutoff=sigma_cutoff,
is_isotope=is_isotope,
mass_variances=mass_variances,
boundary_mfp=boundary_mfp,
solve_collective_phonon=solve_collective_phonon,
is_reducible_collision_matrix=is_reducible_collision_matrix,
is_kappa_star=is_kappa_star,
gv_delta_q=gv_delta_q,
is_full_pp=is_full_pp,
read_pp=read_pp,
pp_filename=input_filename,
pinv_cutoff=pinv_cutoff,
pinv_solver=pinv_solver,
log_level=log_level)
if read_collision:
read_from = _set_collision_from_file(
lbte,
indices=read_collision,
is_reducible_collision_matrix=is_reducible_collision_matrix,
filename=input_filename,
log_level=log_level)
if not read_from:
print("Reading collision failed.")
return False
if log_level:
temps_read = lbte.get_temperatures()
if len(temps_read) > 5:
text = (" %.1f " * 5 + "...") % tuple(temps_read[:5])
text += " %.1f" % temps_read[-1]
else:
text = (" %.1f " * len(temps_read)) % tuple(temps_read)
print("Temperature: " + text)
for i in lbte:
if write_pp:
_write_pp(lbte,
interaction,
i,
filename=output_filename,
compression=compression)
if write_collision:
_write_collision(
lbte,
interaction,
i=i,
is_reducible_collision_matrix=is_reducible_collision_matrix,
is_one_gp_colmat=(grid_points is not None),
filename=output_filename)
lbte.delete_gp_collision_and_pp()
# Write full collision matrix
if write_LBTE_solution:
if ((read_collision and
all_bands_exist(interaction) and
read_from == "grid_points" and
grid_points is None) or
(not read_collision)):
_write_collision(lbte, interaction, filename=output_filename)
if grid_points is None and all_bands_exist(interaction):
lbte.set_kappa_at_sigmas()
if write_kappa:
_write_kappa(
lbte,
interaction.get_primitive().get_volume(),
is_reducible_collision_matrix=is_reducible_collision_matrix,
write_LBTE_solution=write_LBTE_solution,
pinv_solver=pinv_solver,
compression=compression,
filename=output_filename,
log_level=log_level)
return lbte
def _write_collision(lbte,
interaction,
i=None,
is_reducible_collision_matrix=False,
is_one_gp_colmat=False,
filename=None):
grid_points = lbte.get_grid_points()
temperatures = lbte.get_temperatures()
sigmas = lbte.get_sigmas()
sigma_cutoff = lbte.get_sigma_cutoff_width()
gamma = lbte.get_gamma()
gamma_isotope = lbte.get_gamma_isotope()
collision_matrix = lbte.get_collision_matrix()
mesh = lbte.get_mesh_numbers()
if i is not None:
gp = grid_points[i]
if is_one_gp_colmat:
igp = 0
else:
if is_reducible_collision_matrix:
igp = gp
else:
igp = i
if all_bands_exist(interaction):
for j, sigma in enumerate(sigmas):
if gamma_isotope is not None:
gamma_isotope_at_sigma = gamma_isotope[j, igp]
else:
gamma_isotope_at_sigma = None
write_collision_to_hdf5(
temperatures,
mesh,
gamma=gamma[j, :, igp],
gamma_isotope=gamma_isotope_at_sigma,
collision_matrix=collision_matrix[j, :, igp],
grid_point=gp,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
else:
for j, sigma in enumerate(sigmas):
for k, bi in enumerate(interaction.get_band_indices()):
if gamma_isotope is not None:
gamma_isotope_at_sigma = gamma_isotope[j, igp, k]
else:
gamma_isotope_at_sigma = None
write_collision_to_hdf5(
temperatures,
mesh,
gamma=gamma[j, :, igp, k],
gamma_isotope=gamma_isotope_at_sigma,
collision_matrix=collision_matrix[j, :, igp, k],
grid_point=gp,
band_index=bi,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
else:
for j, sigma in enumerate(sigmas):
if gamma_isotope is not None:
gamma_isotope_at_sigma = gamma_isotope[j]
else:
gamma_isotope_at_sigma = None
write_collision_to_hdf5(temperatures,
mesh,
gamma=gamma[j],
gamma_isotope=gamma_isotope_at_sigma,
collision_matrix=collision_matrix[j],
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
def _write_kappa(lbte,
volume,
is_reducible_collision_matrix=False,
write_LBTE_solution=False,
pinv_solver=None,
compression="gzip",
filename=None,
log_level=0):
temperatures = lbte.get_temperatures()
sigmas = lbte.get_sigmas()
sigma_cutoff = lbte.get_sigma_cutoff_width()
mesh = lbte.get_mesh_numbers()
weights = lbte.get_grid_weights()
frequencies = lbte.get_frequencies()
ave_pp = lbte.get_averaged_pp_interaction()
qpoints = lbte.get_qpoints()
kappa = lbte.get_kappa()
kappa_RTA = lbte.get_kappa_RTA()
gamma = lbte.get_gamma()
gamma_isotope = lbte.get_gamma_isotope()
gv = lbte.get_group_velocities()
f_vector = lbte.get_f_vectors()
gv_by_gv = lbte.get_gv_by_gv()
mode_cv = lbte.get_mode_heat_capacities()
mode_kappa = lbte.get_mode_kappa()
mode_kappa_RTA = lbte.get_mode_kappa_RTA()
mfp = lbte.get_mean_free_path()
coleigs = lbte.get_collision_eigenvalues()
# After kappa calculation, the variable is overwritten by unitary matrix
unitary_matrix = lbte.get_collision_matrix()
if is_reducible_collision_matrix:
frequencies = lbte.get_frequencies_all()
else:
frequencies = lbte.get_frequencies()
for i, sigma in enumerate(sigmas):
if gamma_isotope is not None:
gamma_isotope_at_sigma = gamma_isotope[i]
else:
gamma_isotope_at_sigma = None
write_kappa_to_hdf5(temperatures,
mesh,
frequency=frequencies,
group_velocity=gv,
gv_by_gv=gv_by_gv,
mean_free_path=mfp[i],
heat_capacity=mode_cv,
kappa=kappa[i],
mode_kappa=mode_kappa[i],
kappa_RTA=kappa_RTA[i],
mode_kappa_RTA=mode_kappa_RTA[i],
f_vector=f_vector,
gamma=gamma[i],
gamma_isotope=gamma_isotope_at_sigma,
averaged_pp_interaction=ave_pp,
qpoint=qpoints,
weight=weights,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
kappa_unit_conversion=unit_to_WmK / volume,
compression=compression,
filename=filename,
verbose=log_level)
if coleigs is not None:
write_collision_eigenvalues_to_hdf5(temperatures,
mesh,
coleigs[i],
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename,
verbose=log_level)
if write_LBTE_solution:
if pinv_solver is not None:
solver = _select_solver(pinv_solver)
if solver in [1, 2, 3, 4, 5]:
write_unitary_matrix_to_hdf5(
temperatures,
mesh,
unitary_matrix=unitary_matrix,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
solver=solver,
filename=filename,
verbose=log_level)
def _set_collision_from_file(lbte,
indices='all',
is_reducible_collision_matrix=False,
filename=None,
log_level=0):
sigmas = lbte.get_sigmas()
sigma_cutoff = lbte.get_sigma_cutoff_width()
mesh = lbte.get_mesh_numbers()
grid_points = lbte.get_grid_points()
indices = indices
if len(sigmas) > 1:
gamma = []
collision_matrix = []
read_from = None
if log_level:
print("---------------------- Reading collision data from file "
"----------------------")
sys.stdout.flush()
for j, sigma in enumerate(sigmas):
collisions = read_collision_from_hdf5(mesh,
indices=indices,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename,
verbose=(log_level > 0))
if log_level:
sys.stdout.flush()
if collisions:
(colmat_at_sigma,
gamma_at_sigma,
temperatures) = collisions
if len(sigmas) == 1:
collision_matrix = colmat_at_sigma
gamma = np.zeros((1,) + gamma_at_sigma.shape,
dtype='double', order='C')
gamma[0] = gamma_at_sigma
else:
collision_matrix.append(colmat_at_sigma)
gamma.append(gamma_at_sigma)
read_from = "full_matrix"
else:
vals = _allocate_collision(True,
mesh,
sigma,
sigma_cutoff,
grid_points,
indices,
is_reducible_collision_matrix,
filename)
if vals:
colmat_at_sigma, gamma_at_sigma, temperatures = vals
else:
if log_level:
print("Collision at grid point %d doesn't exist." %
grid_points[0])
vals = _allocate_collision(False,
mesh,
sigma,
sigma_cutoff,
grid_points,
indices,
is_reducible_collision_matrix,
filename)
if vals:
colmat_at_sigma, gamma_at_sigma, temperatures = vals
else:
if log_level:
print("Collision at (grid point %d, band index %d) "
"doesn't exist." % (grid_points[0], 1))
return False
for i, gp in enumerate(grid_points):
if not _collect_collision_gp(colmat_at_sigma,
gamma_at_sigma,
temperatures,
mesh,
sigma,
sigma_cutoff,
i,
gp,
indices,
is_reducible_collision_matrix,
filename,
log_level):
num_band = colmat_at_sigma.shape[3]
for j in range(num_band):
if not _collect_collision_band(
colmat_at_sigma,
gamma_at_sigma,
temperatures,
mesh,
sigma,
sigma_cutoff,
i,
gp,
j,
indices,
is_reducible_collision_matrix,
filename,
log_level):
return False
if len(sigmas) == 1:
gamma = gamma_at_sigma
collision_matrix = colmat_at_sigma
else:
gamma.append(gamma_at_sigma[0])
collision_matrix.append(colmat_at_sigma[0])
read_from = "grid_points"
if len(sigmas) > 1:
temperatures = np.array(temperatures, dtype='double', order='C')
gamma = np.array(gamma, dtype='double', order='C')
collision_matrix = np.array(collision_matrix,
dtype='double', order='C')
lbte.set_gamma(gamma)
lbte.set_collision_matrix(collision_matrix)
# lbte.set_temperatures invokes allocation of arrays. So this must
# be called after setting collision_matrix for saving memory
# space.
lbte.set_temperatures(temperatures)
return read_from
def _allocate_collision(for_gps,
mesh,
sigma,
sigma_cutoff,
grid_points,
indices,
is_reducible_collision_matrix,
filename):
num_mesh_points = np.prod(mesh)
if for_gps:
collision = read_collision_from_hdf5(mesh,
indices=indices,
grid_point=grid_points[0],
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename,
verbose=False)
else:
collision = read_collision_from_hdf5(mesh,
indices=indices,
grid_point=grid_points[0],
band_index=0,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename,
verbose=False)
if collision is None:
return False
num_temp = len(collision[2]) # This is to treat indices="all".
if is_reducible_collision_matrix:
if for_gps:
num_band = collision[0].shape[4] # for gps (s,T,b,irgp,b)
else:
num_band = collision[0].shape[3] # for bands (s,T,irgp,b)
gamma_at_sigma = np.zeros(
(1, num_temp, num_mesh_points, num_band),
dtype='double', order='C')
colmat_at_sigma = np.zeros(
(1, num_temp,
num_mesh_points, num_band,
num_mesh_points, num_band),
dtype='double', order='C')
else:
if for_gps:
num_band = collision[0].shape[5] # for gps (s,T,b0,3,irgp,b,3)
else:
num_band = collision[0].shape[4] # for bands (s,T,3,irgp,b,3)
gamma_at_sigma = np.zeros(
(1, num_temp, len(grid_points), num_band),
dtype='double', order='C')
colmat_at_sigma = np.zeros(
(1, num_temp,
len(grid_points), num_band, 3,
len(grid_points), num_band, 3),
dtype='double', order='C')
temperatures = np.zeros(num_temp, dtype='double', order='C')
return colmat_at_sigma, gamma_at_sigma, temperatures
def _collect_collision_gp(colmat_at_sigma,
gamma_at_sigma,
temperatures,
mesh,
sigma,
sigma_cutoff,
i,
gp,
indices,
is_reducible_collision_matrix,
filename,
log_level):
collision_gp = read_collision_from_hdf5(
mesh,
indices=indices,
grid_point=gp,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename,
verbose=(log_level > 0))
if log_level:
sys.stdout.flush()
if not collision_gp:
return False
(colmat_at_gp,
gamma_at_gp,
temperatures_at_gp) = collision_gp
if is_reducible_collision_matrix:
igp = gp
else:
igp = i
gamma_at_sigma[0, :, igp] = gamma_at_gp
colmat_at_sigma[0, :, igp] = colmat_at_gp[0]
temperatures[:] = temperatures_at_gp
return True
def _collect_collision_band(colmat_at_sigma,
gamma_at_sigma,
temperatures,
mesh,
sigma,
sigma_cutoff,
i,
gp,
j,
indices,
is_reducible_collision_matrix,
filename,
log_level):
collision_band = read_collision_from_hdf5(
mesh,
indices=indices,
grid_point=gp,
band_index=j,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename,
verbose=(log_level > 0))
if log_level:
sys.stdout.flush()
if collision_band is False:
return False
(colmat_at_band,
gamma_at_band,
temperatures_at_band) = collision_band
if is_reducible_collision_matrix:
igp = gp
else:
igp = i
gamma_at_sigma[0, :, igp, j] = gamma_at_band
colmat_at_sigma[0, :, igp, j] = colmat_at_band[0]
temperatures[:] = temperatures_at_band
return True
def _select_solver(pinv_solver):
try:
import phono3py._phono3py as phono3c
default_solver = phono3c.default_colmat_solver()
except ImportError:
print("Phono3py C-routine is not compiled correctly.")
default_solver = 4
solver_numbers = (1, 2, 3, 4, 5, 6)
solver = pinv_solver
if solver == 0: # default solver
if default_solver in (4, 5, 6):
try:
import scipy.linalg
except ImportError:
solver = 1
else:
solver = default_solver
else:
solver = default_solver
elif solver not in solver_numbers:
solver = default_solver
return solver
def diagonalize_collision_matrix(collision_matrices,
i_sigma=None,
i_temp=None,
pinv_solver=0,
log_level=0):
"""Diagonalize collision matrices.
Note
----
collision_matricies is overwritten by eigenvectors.
Parameters
----------
collision_matricies : ndarray, optional
Collision matrix. This ndarray has to have the following size and
flags.
shapes:
(sigmas, temperatures, prod(mesh), num_band, prod(mesh), num_band)
(sigmas, temperatures, ir_grid_points, num_band, 3,
ir_grid_points, num_band, 3)
(size, size)
dtype='double', order='C'
i_sigma : int, optional
Index of BZ integration methods, tetrahedron method and smearing
method with widths. Default is None.
i_temp : int, optional
Index of temperature. Default is None.
pinv_solver : int, optional
Diagnalization solver choice.
log_level : int, optional
Verbosity level. Smaller is more quiet. Default is 0.
Returns
-------
w : ndarray, optional
Eigenvalues.
shape=(size_of_collision_matrix,), dtype='double'
"""
start = time.time()
# Matrix size of collision matrix to be diagonalized.
# The following value is expected:
# ir-colmat: num_ir_grid_points * num_band * 3
# red-colmat: num_mesh_points * num_band
shape = collision_matrices.shape
if len(shape) == 6:
size = shape[2] * shape[3]
assert size == shape[4] * shape[5]
elif len(shape) == 8:
size = np.prod(shape[2:5])
assert size == np.prod(shape[5:8])
elif len(shape) == 2:
size = shape[0]
assert size == shape[1]
solver = _select_solver(pinv_solver)
# [1] dsyev: safer and slower than dsyevd and smallest memory usage
# [2] dsyevd: faster than dsyev and largest memory usage
if solver in [1, 2]:
if log_level:
routine = ['dsyev', 'dsyevd'][solver - 1]
sys.stdout.write("Diagonalizing by lapacke %s... " % routine)
sys.stdout.flush()
import phono3py._phono3py as phono3c
w = np.zeros(size, dtype='double')
if i_sigma is None:
_i_sigma = 0
else:
_i_sigma = i_sigma
if i_temp is None:
_i_temp = 0
else:
_i_temp = i_temp
phono3c.diagonalize_collision_matrix(collision_matrices,
w,
_i_sigma,
_i_temp,
0.0,
(solver + 1) % 2,
0) # only diagonalization
elif solver == 3: # np.linalg.eigh depends on dsyevd.
if log_level:
sys.stdout.write("Diagonalizing by np.linalg.eigh... ")
sys.stdout.flush()
col_mat = collision_matrices[i_sigma, i_temp].reshape(
size, size)
w, col_mat[:] = np.linalg.eigh(col_mat)
elif solver == 4: # fully scipy dsyev
if log_level:
sys.stdout.write("Diagonalizing by "
"scipy.linalg.lapack.dsyev... ")
sys.stdout.flush()
import scipy.linalg
col_mat = collision_matrices[i_sigma, i_temp].reshape(
size, size)
w, _, info = scipy.linalg.lapack.dsyev(col_mat.T, overwrite_a=1)
elif solver == 5: # fully scipy dsyevd
if log_level:
sys.stdout.write("Diagonalizing by "
"scipy.linalg.lapack.dsyevd... ")
sys.stdout.flush()
import scipy.linalg
col_mat = collision_matrices[i_sigma, i_temp].reshape(
size, size)
w, _, info = scipy.linalg.lapack.dsyevd(col_mat.T, overwrite_a=1)
if log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
return w
class Conductivity_LBTE(Conductivity):
def __init__(self,
interaction,
symmetry,
grid_points=None,
temperatures=None,
sigmas=None,
sigma_cutoff=None,
is_isotope=False,
mass_variances=None,
boundary_mfp=None, # in micrometre
solve_collective_phonon=False,
is_reducible_collision_matrix=False,
is_kappa_star=True,
gv_delta_q=None, # finite difference for group veolocity
is_full_pp=False,
read_pp=False,
pp_filename=None,
pinv_cutoff=1.0e-8,
pinv_solver=0,
log_level=0):
self._pp = None
self._temperatures = None
self._sigmas = None
self._sigma_cutoff = None
self._is_kappa_star = None
self._gv_delta_q = None
self._is_full_pp = None
self._log_level = None
self._primitive = None
self._dm = None
self._frequency_factor_to_THz = None
self._cutoff_frequency = None
self._boundary_mfp = None
self._symmetry = None
self._point_operations = None
self._rotations_cartesian = None
self._grid_points = None
self._grid_weights = None
self._grid_address = None
self._ir_grid_points = None
self._ir_grid_weights = None
self._kappa = None
self._mode_kappa = None
self._kappa_RTA = None
self._mode_kappa_RTA = None
self._read_gamma = False
self._read_gamma_iso = False
self._frequencies = None
self._cv = None
self._gv = None
self._f_vectors = None
self._gv_sum2 = None
self._mfp = None
self._gamma = None
self._gamma_iso = None
self._averaged_pp_interaction = None
self._mesh = None
self._conversion_factor = None
self._is_isotope = None
self._isotope = None
self._mass_variances = None
self._grid_point_count = None
self._collision_eigenvalues = None
Conductivity.__init__(self,
interaction,
symmetry,
grid_points=grid_points,
temperatures=temperatures,
sigmas=sigmas,
sigma_cutoff=sigma_cutoff,
is_isotope=is_isotope,
mass_variances=mass_variances,
boundary_mfp=boundary_mfp,
is_kappa_star=is_kappa_star,
gv_delta_q=gv_delta_q,
is_full_pp=is_full_pp,
log_level=log_level)
self._is_reducible_collision_matrix = is_reducible_collision_matrix
self._solve_collective_phonon = solve_collective_phonon
if not self._is_kappa_star:
self._is_reducible_collision_matrix = True
self._collision_matrix = None
self._read_pp = read_pp
self._pp_filename = pp_filename
self._pinv_cutoff = pinv_cutoff
self._pinv_solver = pinv_solver
if grid_points is None:
self._all_grid_points = True
else:
self._all_grid_points = False
if self._temperatures is not None:
self._allocate_values()
def set_kappa_at_sigmas(self):
if len(self._grid_points) != len(self._ir_grid_points):
print("Collision matrix is not well created.")
import sys
sys.exit(1)
else:
self._set_kappa_at_sigmas()
def set_collision_matrix(self, collision_matrix):
self._collision_matrix = collision_matrix
def get_f_vectors(self):
return self._f_vectors
@property
def collision_matrix(self):
return self._collision_matrix
def get_collision_matrix(self):
return self.collision_matrix
def get_collision_eigenvalues(self):
return self._collision_eigenvalues
def get_mean_free_path(self):
return self._mfp
def get_frequencies_all(self):
return self._frequencies[:np.prod(self._mesh)]
def get_kappa_RTA(self):
return self._kappa_RTA
def get_mode_kappa_RTA(self):
return self._mode_kappa_RTA
def delete_gp_collision_and_pp(self):
self._collision.delete_integration_weights()
self._pp.delete_interaction_strength()
def _run_at_grid_point(self):
i = self._grid_point_count
self._show_log_header(i)
gp = self._grid_points[i]
if not self._all_grid_points:
self._collision_matrix[:] = 0
if not self._read_gamma:
self._collision.set_grid_point(gp)
if self._log_level:
print("Number of triplets: %d" %
len(self._pp.get_triplets_at_q()[0]))
self._set_collision_matrix_at_sigmas(i)
if self._is_reducible_collision_matrix:
igp = gp
else:
igp = i
self._set_harmonic_properties(i, igp)
if self._isotope is not None:
gamma_iso = self._get_gamma_isotope_at_sigmas(i)
band_indices = self._pp.get_band_indices()
self._gamma_iso[:, igp, :] = gamma_iso[:, band_indices]
if self._log_level:
self._show_log(i)
def _allocate_values(self):
num_band0 = len(self._pp.band_indices)
num_band = len(self._primitive) * 3
num_ir_grid_points = len(self._ir_grid_points)
num_temp = len(self._temperatures)
num_mesh_points = np.prod(self._mesh)
if self._is_reducible_collision_matrix:
num_grid_points = num_mesh_points
else:
num_grid_points = len(self._grid_points)
if self._all_grid_points:
num_stored_grid_points = num_grid_points
else:
num_stored_grid_points = 1
self._kappa = np.zeros((len(self._sigmas), num_temp, 6),
dtype='double', order='C')
self._kappa_RTA = np.zeros((len(self._sigmas), num_temp, 6),
dtype='double', order='C')
self._gv = np.zeros((num_grid_points, num_band0, 3),
dtype='double', order='C')
self._f_vectors = np.zeros((num_grid_points, num_band0, 3),
dtype='double', order='C')
self._gv_sum2 = np.zeros((num_grid_points, num_band0, 6),
dtype='double', order='C')
self._mfp = np.zeros((len(self._sigmas),
num_temp,
num_grid_points,
num_band0,
3), dtype='double', order='C')
self._cv = np.zeros((num_temp, num_grid_points, num_band0),
dtype='double', order='C')
if self._is_full_pp:
self._averaged_pp_interaction = np.zeros(
(num_grid_points, num_band0), dtype='double', order='C')
if self._gamma is None:
self._gamma = np.zeros((len(self._sigmas),
num_temp,
num_grid_points,
num_band0), dtype='double', order='C')
if self._isotope is not None:
self._gamma_iso = np.zeros((len(self._sigmas),
num_grid_points,
num_band0), dtype='double', order='C')
if self._is_reducible_collision_matrix:
self._mode_kappa = np.zeros((len(self._sigmas),
num_temp,
num_mesh_points,
num_band,
6), dtype='double', order='C')
self._mode_kappa_RTA = np.zeros((len(self._sigmas),
num_temp,
num_mesh_points,
num_band,
6), dtype='double', order='C')
self._collision = CollisionMatrix(
self._pp,
is_reducible_collision_matrix=True,
log_level=self._log_level)
if self._collision_matrix is None:
self._collision_matrix = np.empty(
(len(self._sigmas), num_temp,
num_stored_grid_points, num_band0,
num_mesh_points, num_band),
dtype='double', order='C')
self._collision_matrix[:] = 0
self._collision_eigenvalues = np.zeros(
(len(self._sigmas), num_temp, num_mesh_points * num_band),
dtype='double', order='C')
else:
self._mode_kappa = np.zeros((len(self._sigmas),
num_temp,
num_grid_points,
num_band0,
6), dtype='double')
self._mode_kappa_RTA = np.zeros((len(self._sigmas),
num_temp,
num_grid_points,
num_band0,
6), dtype='double')
self._rot_grid_points = np.zeros(
(len(self._ir_grid_points), len(self._point_operations)),
dtype='uintp')
for i, ir_gp in enumerate(self._ir_grid_points):
self._rot_grid_points[i] = get_grid_points_by_rotations(
self._grid_address[ir_gp],
self._point_operations,
self._mesh)
self._collision = CollisionMatrix(
self._pp,
point_operations=self._point_operations,
ir_grid_points=self._ir_grid_points,
rot_grid_points=self._rot_grid_points,
log_level=self._log_level)
if self._collision_matrix is None:
self._collision_matrix = np.empty(
(len(self._sigmas),
num_temp,
num_stored_grid_points, num_band0, 3,
num_ir_grid_points, num_band, 3),
dtype='double', order='C')
self._collision_matrix[:] = 0
self._collision_eigenvalues = np.zeros(
(len(self._sigmas),
num_temp,
num_ir_grid_points * num_band * 3),
dtype='double', order='C')
def _set_collision_matrix_at_sigmas(self, i):
for j, sigma in enumerate(self._sigmas):
if self._log_level:
text = "Calculating collision matrix with "
if sigma is None:
text += "tetrahedron method."
else:
text += "sigma=%s" % sigma
if self._sigma_cutoff is None:
text += "."
else:
text += "(%4.2f SD)." % self._sigma_cutoff
print(text)
self._collision.set_sigma(sigma, sigma_cutoff=self._sigma_cutoff)
self._collision.set_integration_weights()
if self._read_pp:
pp, _g_zero = read_pp_from_hdf5(
self._mesh,
grid_point=self._grid_points[i],
sigma=sigma,
sigma_cutoff=self._sigma_cutoff,
filename=self._pp_filename,
verbose=(self._log_level > 0))
_, g_zero = self._collision.get_integration_weights()
if self._log_level:
if len(self._sigmas) > 1:
print("Multiple sigmas or mixing smearing and "
"tetrahedron method is not supported.")
if _g_zero is not None and (_g_zero != g_zero).any():
raise ValueError("Inconsistency found in g_zero.")
self._collision.set_interaction_strength(pp)
elif j != 0 and (self._is_full_pp or self._sigma_cutoff is None):
if self._log_level:
print("Existing ph-ph interaction is used.")
else:
if self._log_level:
print("Calculating ph-ph interaction...")
self._collision.run_interaction(is_full_pp=self._is_full_pp)
if self._is_full_pp and j == 0:
self._averaged_pp_interaction[i] = (
self._pp.get_averaged_interaction())
for k, t in enumerate(self._temperatures):
self._collision.set_temperature(t)
self._collision.run()
if self._all_grid_points:
if self._is_reducible_collision_matrix:
i_data = self._grid_points[i]
else:
i_data = i
else:
i_data = 0
self._gamma[j, k, i_data] = (
self._collision.get_imag_self_energy())
self._collision_matrix[j, k, i_data] = (
self._collision.get_collision_matrix())
def _set_kappa_at_sigmas(self):
if self._is_reducible_collision_matrix:
if self._is_kappa_star:
self._average_collision_matrix_by_degeneracy()
self._expand_collisions()
self._combine_reducible_collisions()
weights = np.ones(np.prod(self._mesh), dtype='intc')
self._symmetrize_collision_matrix()
else:
self._combine_collisions()
weights = self._get_weights()
for i, w_i in enumerate(weights):
for j, w_j in enumerate(weights):
self._collision_matrix[:, :, i, :, :, j, :, :] *= w_i * w_j
self._average_collision_matrix_by_degeneracy()
self._symmetrize_collision_matrix()
for j, sigma in enumerate(self._sigmas):
if self._log_level:
text = "----------- Thermal conductivity (W/m-k) "
if sigma:
text += "for sigma=%s -----------" % sigma
else:
text += "with tetrahedron method -----------"
print(text)
sys.stdout.flush()
for k, t in enumerate(self._temperatures):
if t > 0:
self._set_kappa_RTA(j, k, weights)
w = diagonalize_collision_matrix(
self._collision_matrix,
i_sigma=j, i_temp=k,
pinv_solver=self._pinv_solver,
log_level=self._log_level)
self._collision_eigenvalues[j, k] = w
self._set_kappa(j, k, weights)
if self._log_level:
print(("#%6s " + " %-10s" * 6) %
("T(K)", "xx", "yy", "zz", "yz", "xz", "xy"))
print(("%7.1f " + " %10.3f" * 6) %
((t,) + tuple(self._kappa[j, k])))
print((" %6s " + " %10.3f" * 6) %
(("(RTA)",) + tuple(self._kappa_RTA[j, k])))
print("-" * 76)
sys.stdout.flush()
sys.stdout.flush()
if self._log_level:
print('')
def _combine_collisions(self):
num_band = self._primitive.get_number_of_atoms() * 3
for j, k in list(np.ndindex((len(self._sigmas),
len(self._temperatures)))):
for i, ir_gp in enumerate(self._ir_grid_points):
for r, r_gp in zip(self._rotations_cartesian,
self._rot_grid_points[i]):
if ir_gp != r_gp:
continue
main_diagonal = self._get_main_diagonal(i, j, k)
for l in range(num_band):
self._collision_matrix[
j, k, i, l, :, i, l, :] += main_diagonal[l] * r
def _combine_reducible_collisions(self):
num_band = self._primitive.get_number_of_atoms() * 3
num_mesh_points = np.prod(self._mesh)
for j, k in list(
np.ndindex((len(self._sigmas), len(self._temperatures)))):
for i in range(num_mesh_points):
main_diagonal = self._get_main_diagonal(i, j, k)
for l in range(num_band):
self._collision_matrix[
j, k, i, l, i, l] += main_diagonal[l]
def _expand_collisions(self):
start = time.time()
if self._log_level:
sys.stdout.write("- Expanding properties to all grid points ")
sys.stdout.flush()
num_mesh_points = np.prod(self._mesh)
num_rot = len(self._point_operations)
rot_grid_points = np.zeros((num_rot, num_mesh_points), dtype='uintp')
for i in range(num_mesh_points):
rot_grid_points[:, i] = get_grid_points_by_rotations(
self._grid_address[i],
self._point_operations,
self._mesh)
try:
import phono3py._phono3py as phono3c
phono3c.expand_collision_matrix(self._collision_matrix,
self._ir_grid_points,
rot_grid_points)
except ImportError:
print("Phono3py C-routine is not compiled correctly.")
for i, ir_gp in enumerate(self._ir_grid_points):
multi = (rot_grid_points[:, ir_gp] == ir_gp).sum()
colmat_irgp = self._collision_matrix[:, :, ir_gp, :, :, :].copy()
colmat_irgp /= multi
self._collision_matrix[:, :, ir_gp, :, :, :] = 0
for j, r in enumerate(self._rotations_cartesian):
gp_r = rot_grid_points[j, ir_gp]
for k in range(num_mesh_points):
gp_c = rot_grid_points[j, k]
self._collision_matrix[:, :, gp_r, :, gp_c, :] += (
colmat_irgp[:, :, :, k, :])
for i, ir_gp in enumerate(self._ir_grid_points):
gv_irgp = self._gv[ir_gp].copy()
self._gv[ir_gp] = 0
cv_irgp = self._cv[:, ir_gp, :].copy()
self._cv[:, ir_gp, :] = 0
gamma_irgp = self._gamma[:, :, ir_gp, :].copy()
self._gamma[:, :, ir_gp, :] = 0
multi = (rot_grid_points[:, ir_gp] == ir_gp).sum()
if self._gamma_iso is not None:
gamma_iso_irgp = self._gamma_iso[:, ir_gp, :].copy()
self._gamma_iso[:, ir_gp, :] = 0
for j, r in enumerate(self._rotations_cartesian):
gp_r = rot_grid_points[j, ir_gp]
self._gamma[:, :, gp_r, :] += gamma_irgp / multi
if self._gamma_iso is not None:
self._gamma_iso[:, gp_r, :] += gamma_iso_irgp / multi
self._gv[gp_r] += np.dot(gv_irgp, r.T) / multi
self._cv[:, gp_r, :] += cv_irgp / multi
if self._log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
def _get_weights(self):
"""Returns weights used for collision matrix and |X> and |f>
self._rot_grid_points : ndarray
shape=(ir_grid_points, point_operations), dtype='uintp'
r_gps : grid points of arms of k-star with duplicates
len(r_gps) == order of crystallographic point group
len(unique(r_gps)) == number of arms of the k-star
Returns
-------
weights : list
sqrt(g_k)/|g|, where g is the crystallographic point group and
g_k is the number of arms of k-star.
"""
weights = []
n = float(self._rot_grid_points.shape[1])
for r_gps in self._rot_grid_points:
weights.append(np.sqrt(len(np.unique(r_gps)) / n))
return weights
def _symmetrize_collision_matrix(self):
start = time.time()
try:
import phono3py._phono3py as phono3c
if self._log_level:
sys.stdout.write("- Making collision matrix symmetric "
"(built-in) ")
sys.stdout.flush()
phono3c.symmetrize_collision_matrix(self._collision_matrix)
except ImportError:
if self._log_level:
sys.stdout.write("- Making collision matrix symmetric "
"(numpy) ")
sys.stdout.flush()
if self._is_reducible_collision_matrix:
size = np.prod(self._collision_matrix.shape[2:4])
else:
size = np.prod(self._collision_matrix.shape[2:5])
for i in range(self._collision_matrix.shape[0]):
for j in range(self._collision_matrix.shape[1]):
col_mat = self._collision_matrix[i, j].reshape(size, size)
col_mat += col_mat.T
col_mat /= 2
if self._log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
def _average_collision_matrix_by_degeneracy(self):
start = time.time()
# Average matrix elements belonging to degenerate bands
if self._log_level:
sys.stdout.write("- Averaging collision matrix elements "
"by phonon degeneracy ")
sys.stdout.flush()
col_mat = self._collision_matrix
for i, gp in enumerate(self._ir_grid_points):
freqs = self._frequencies[gp]
deg_sets = degenerate_sets(freqs)
for dset in deg_sets:
bi_set = []
for j in range(len(freqs)):
if j in dset:
bi_set.append(j)
if self._is_reducible_collision_matrix:
sum_col = (col_mat[:, :, gp, bi_set, :, :].sum(axis=2) /
len(bi_set))
for j in bi_set:
col_mat[:, :, gp, j, :, :] = sum_col
else:
sum_col = (
col_mat[:, :, i, bi_set, :, :, :, :].sum(axis=2) /
len(bi_set))
for j in bi_set:
col_mat[:, :, i, j, :, :, :, :] = sum_col
for i, gp in enumerate(self._ir_grid_points):
freqs = self._frequencies[gp]
deg_sets = degenerate_sets(freqs)
for dset in deg_sets:
bi_set = []
for j in range(len(freqs)):
if j in dset:
bi_set.append(j)
if self._is_reducible_collision_matrix:
sum_col = (col_mat[:, :, :, :, gp, bi_set].sum(axis=4) /
len(bi_set))
for j in bi_set:
col_mat[:, :, :, :, gp, j] = sum_col
else:
sum_col = (
col_mat[:, :, :, :, :, i, bi_set, :].sum(axis=5) /
len(bi_set))
for j in bi_set:
col_mat[:, :, :, :, :, i, j, :] = sum_col
if self._log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
def _get_X(self, i_temp, weights, gv):
num_band = self._primitive.get_number_of_atoms() * 3
X = gv.copy()
if self._is_reducible_collision_matrix:
num_mesh_points = np.prod(self._mesh)
freqs = self._frequencies[:num_mesh_points]
else:
freqs = self._frequencies[self._ir_grid_points]
t = self._temperatures[i_temp]
sinh = np.where(freqs > self._cutoff_frequency,
np.sinh(freqs * THzToEv / (2 * Kb * t)),
-1.0)
inv_sinh = np.where(sinh > 0, 1.0 / sinh, 0)
freqs_sinh = freqs * THzToEv * inv_sinh / (4 * Kb * t ** 2)
for i, f in enumerate(freqs_sinh):
X[i] *= weights[i]
for j in range(num_band):
X[i, j] *= f[j]
if t > 0:
return X.reshape(-1, 3)
else:
return np.zeros_like(X.reshape(-1, 3))
def _get_Y(self, i_sigma, i_temp, weights, X):
solver = _select_solver(self._pinv_solver)
num_band = self._primitive.get_number_of_atoms() * 3
if self._is_reducible_collision_matrix:
num_grid_points = np.prod(self._mesh)
size = num_grid_points * num_band
else:
num_grid_points = len(self._ir_grid_points)
size = num_grid_points * num_band * 3
v = self._collision_matrix[i_sigma, i_temp].reshape(size, size)
# Transpose eigvecs because colmat was solved by column major order
if solver in [1, 2, 4, 5]:
v = v.T
start = time.time()
if solver in [0, 1, 2, 3, 4, 5]:
if self._log_level:
sys.stdout.write("Calculating pseudo-inv with cutoff=%-.1e "
"(np.dot) " % self._pinv_cutoff)
sys.stdout.flush()
e = self._get_eigvals_pinv(i_sigma, i_temp)
if self._is_reducible_collision_matrix:
X1 = np.dot(v.T, X)
for i in range(3):
X1[:, i] *= e
Y = np.dot(v, X1)
else:
Y = np.dot(v, e * np.dot(v.T, X.ravel())).reshape(-1, 3)
else: # solver=6 This is slower as far as tested.
import phono3py._phono3py as phono3c
if self._log_level:
sys.stdout.write("Calculating pseudo-inv with cutoff=%-.1e "
"(built-in) " % self._pinv_cutoff)
sys.stdout.flush()
w = self._collision_eigenvalues[i_sigma, i_temp]
phono3c.pinv_from_eigensolution(self._collision_matrix,
w,
i_sigma,
i_temp,
self._pinv_cutoff,
0)
if self._is_reducible_collision_matrix:
Y = np.dot(v, X)
else:
Y = np.dot(v, X.ravel()).reshape(-1, 3)
self._set_f_vectors(Y, num_grid_points, weights)
if self._log_level:
print("[%.3fs]" % (time.time() - start))
sys.stdout.flush()
return Y
def _set_f_vectors(self, Y, num_grid_points, weights):
# Collision matrix is half of that defined in Chaput's paper.
# Therefore Y is divided by 2.
num_band = self._primitive.get_number_of_atoms() * 3
self._f_vectors[:] = ((Y / 2).reshape(num_grid_points, num_band * 3).T
/ weights).T.reshape(self._f_vectors.shape)
def _get_eigvals_pinv(self, i_sigma, i_temp):
w = self._collision_eigenvalues[i_sigma, i_temp]
e = np.zeros_like(w)
for l, val in enumerate(w):
if abs(val) > self._pinv_cutoff:
e[l] = 1 / val
return e
def _get_I(self, a, b, size, plus_transpose=True):
"""Return I matrix in Chaput's PRL paper.
None is returned if I is zero matrix.
"""
r_sum = np.zeros((3, 3), dtype='double', order='C')
for r in self._rotations_cartesian:
for i in range(3):
for j in range(3):
r_sum[i, j] += r[a, i] * r[b, j]
if plus_transpose:
r_sum += r_sum.T
# Return None not to consume computer for diagonalization
if (np.abs(r_sum) < 1e-10).all():
return None
# Same as np.kron(np.eye(size), r_sum), but writen as below
# to be sure the values in memory C-congiguous with 'double'.
I_mat = np.zeros((3 * size, 3 * size), dtype='double', order='C')
for i in range(size):
I_mat[(i * 3):((i + 1) * 3), (i * 3):((i + 1) * 3)] = r_sum
return I_mat
def _set_kappa(self, i_sigma, i_temp, weights):
N = self._num_sampling_grid_points
if self._is_reducible_collision_matrix:
X = self._get_X(i_temp, weights, self._gv)
num_mesh_points = np.prod(self._mesh)
Y = self._get_Y(i_sigma, i_temp, weights, X)
self._set_mean_free_path(i_sigma, i_temp, weights, Y)
# Putting self._rotations_cartesian is to symmetrize kappa.
# None can be put instead for watching pure information.
self._set_mode_kappa(self._mode_kappa,
X,
Y,
num_mesh_points,
self._rotations_cartesian,
i_sigma,
i_temp)
self._mode_kappa[i_sigma, i_temp] /= len(self._rotations_cartesian)
self._kappa[i_sigma, i_temp] = (
self._mode_kappa[i_sigma, i_temp].sum(axis=0).sum(axis=0) / N)
else:
if self._solve_collective_phonon:
self._set_mode_kappa_Chaput(i_sigma, i_temp, weights)
else:
X = self._get_X(i_temp, weights, self._gv)
num_ir_grid_points = len(self._ir_grid_points)
Y = self._get_Y(i_sigma, i_temp, weights, X)
self._set_mean_free_path(i_sigma, i_temp, weights, Y)
self._set_mode_kappa(self._mode_kappa,
X,
Y,
num_ir_grid_points,
self._rotations_cartesian,
i_sigma,
i_temp)
# self._set_mode_kappa_from_mfp(weights,
# num_ir_grid_points,
# self._rotations_cartesian,
# i_sigma,
# i_temp)
self._kappa[i_sigma, i_temp] = (
self._mode_kappa[i_sigma, i_temp].sum(axis=0).sum(axis=0) / N)
def _set_kappa_RTA(self, i_sigma, i_temp, weights):
N = self._num_sampling_grid_points
num_band = self._primitive.get_number_of_atoms() * 3
X = self._get_X(i_temp, weights, self._gv)
Y = np.zeros_like(X)
if self._is_reducible_collision_matrix:
# This RTA is not equivalent to conductivity_RTA.
# The lifetime is defined from the diagonal part of
# collision matrix.
num_mesh_points = np.prod(self._mesh)
size = num_mesh_points * num_band
v_diag = np.diagonal(
self._collision_matrix[i_sigma, i_temp].reshape(size, size))
for gp in range(num_mesh_points):
frequencies = self._frequencies[gp]
for j, f in enumerate(frequencies):
if f > self._cutoff_frequency:
i_mode = gp * num_band + j
Y[i_mode, :] = X[i_mode, :] / v_diag[i_mode]
# Putting self._rotations_cartesian is to symmetrize kappa.
# None can be put instead for watching pure information.
self._set_mode_kappa(self._mode_kappa_RTA,
X,
Y,
num_mesh_points,
self._rotations_cartesian,
i_sigma,
i_temp)
g = len(self._rotations_cartesian)
self._mode_kappa_RTA[i_sigma, i_temp] /= g
self._kappa_RTA[i_sigma, i_temp] = (
self._mode_kappa_RTA[i_sigma, i_temp].sum(axis=0).sum(axis=0) /
N)
else:
# This RTA is supposed to be the same as conductivity_RTA.
num_ir_grid_points = len(self._ir_grid_points)
size = num_ir_grid_points * num_band * 3
for i, gp in enumerate(self._ir_grid_points):
g = self._get_main_diagonal(i, i_sigma, i_temp)
frequencies = self._frequencies[gp]
for j, f in enumerate(frequencies):
if f > self._cutoff_frequency:
i_mode = i * num_band + j
old_settings = np.seterr(all='raise')
try:
Y[i_mode, :] = X[i_mode, :] / g[j]
except:
print("=" * 26 + " Warning " + "=" * 26)
print(" Unexpected physical condition of ph-ph "
"interaction calculation was found.")
print(" g[j]=%f at gp=%d, band=%d, freq=%f" %
(g[j], gp, j + 1, f))
print("=" * 61)
np.seterr(**old_settings)
self._set_mode_kappa(self._mode_kappa_RTA,
X,
Y,
num_ir_grid_points,
self._rotations_cartesian,
i_sigma,
i_temp)
self._kappa_RTA[i_sigma, i_temp] = (
self._mode_kappa_RTA[i_sigma, i_temp].sum(axis=0).sum(axis=0) /
N)
def _set_mode_kappa(self,
mode_kappa,
X,
Y,
num_grid_points,
rotations_cartesian,
i_sigma,
i_temp):
num_band = self._primitive.get_number_of_atoms() * 3
for i, (v_gp, f_gp) in enumerate(zip(X.reshape(num_grid_points,
num_band, 3),
Y.reshape(num_grid_points,
num_band, 3))):
for j, (v, f) in enumerate(zip(v_gp, f_gp)):
# Do not consider three lowest modes at Gamma-point
# It is assumed that there are no imaginary modes.
if (self._grid_address[i] == 0).all() and j < 3:
continue
if rotations_cartesian is None:
sum_k = np.outer(v, f)
else:
sum_k = np.zeros((3, 3), dtype='double')
for r in rotations_cartesian:
sum_k += np.outer(np.dot(r, v), np.dot(r, f))
sum_k = sum_k + sum_k.T
for k, vxf in enumerate(
((0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1))):
mode_kappa[i_sigma, i_temp, i, j, k] = sum_k[vxf]
t = self._temperatures[i_temp]
# Collision matrix is half of that defined in Chaput's paper.
# Therefore here 2 is not necessary multiplied.
# sum_k = sum_k + sum_k.T is equivalent to I(a,b) + I(b,a).
mode_kappa[i_sigma, i_temp] *= self._conversion_factor * Kb * t ** 2
def _set_mode_kappa_Chaput(self, i_sigma, i_temp, weights):
"""Calculate mode kappa by the way in Laurent Chaput's PRL paper.
This gives the different result from _set_mode_kappa and requires more
memory space.
"""
X = self._get_X(i_temp, weights, self._gv).ravel()
num_ir_grid_points = len(self._ir_grid_points)
num_band = self._primitive.get_number_of_atoms() * 3
size = num_ir_grid_points * num_band * 3
v = self._collision_matrix[i_sigma, i_temp].reshape(size, size)
solver = _select_solver(self._pinv_solver)
if solver in [1, 2, 4, 5]:
v = v.T
e = self._get_eigvals_pinv(i_sigma, i_temp)
t = self._temperatures[i_temp]
omega_inv = np.empty(v.shape, dtype='double', order='C')
np.dot(v, (e * v).T, out=omega_inv)
Y = np.dot(omega_inv, X)
self._set_f_vectors(Y, num_ir_grid_points, weights)
elems = ((0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1))
for i, vxf in enumerate(elems):
mat = self._get_I(vxf[0], vxf[1], num_ir_grid_points * num_band)
self._mode_kappa[i_sigma, i_temp, :, :, i] = 0
if mat is not None:
np.dot(mat, omega_inv, out=mat)
# vals = (X ** 2 * np.diag(mat)).reshape(-1, 3).sum(axis=1)
# vals = vals.reshape(num_ir_grid_points, num_band)
# self._mode_kappa[i_sigma, i_temp, :, :, i] = vals
w = diagonalize_collision_matrix(mat,
pinv_solver=self._pinv_solver,
log_level=self._log_level)
if solver in [1, 2, 4, 5]:
mat = mat.T
spectra = np.dot(mat.T, X) ** 2 * w
for s, eigvec in zip(spectra, mat.T):
vals = s * (eigvec ** 2).reshape(-1, 3).sum(axis=1)
vals = vals.reshape(num_ir_grid_points, num_band)
self._mode_kappa[i_sigma, i_temp, :, :, i] += vals
factor = self._conversion_factor * Kb * t ** 2
self._mode_kappa[i_sigma, i_temp] *= factor
def _set_mode_kappa_from_mfp(self,
weights,
num_grid_points,
rotations_cartesian,
i_sigma,
i_temp):
for i, (v_gp, mfp_gp, cv_gp) in enumerate(
zip(self._gv, self._mfp[i_sigma, i_temp], self._cv[i_temp])):
for j, (v, mfp, cv) in enumerate(zip(v_gp, mfp_gp, cv_gp)):
sum_k = np.zeros((3, 3), dtype='double')
for r in rotations_cartesian:
sum_k += np.outer(np.dot(r, v), np.dot(r, mfp))
sum_k = (sum_k + sum_k.T) / 2 * cv * weights[i] ** 2 * 2 * np.pi
for k, vxf in enumerate(
((0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1))):
self._mode_kappa[i_sigma, i_temp, i, j, k] = sum_k[vxf]
self._mode_kappa *= - self._conversion_factor
def _set_mean_free_path(self, i_sigma, i_temp, weights, Y):
t = self._temperatures[i_temp]
# shape = (num_grid_points, num_band, 3),
for i, f_gp in enumerate(self._f_vectors):
for j, f in enumerate(f_gp):
cv = self._cv[i_temp, i, j]
if cv < 1e-10:
continue
self._mfp[i_sigma, i_temp, i, j] = (
- 2 * t * np.sqrt(Kb / cv) * f / (2 * np.pi))
def _show_log(self, i):
gp = self._grid_points[i]
frequencies = self._frequencies[gp]
if self._is_reducible_collision_matrix:
gv = self._gv[gp]
else:
gv = self._gv[i]
if self._is_full_pp:
ave_pp = self._averaged_pp_interaction[i]
text = "Frequency group velocity (x, y, z) |gv| Pqj"
else:
text = "Frequency group velocity (x, y, z) |gv|"
if self._gv_delta_q is None:
pass
else:
text += " (dq=%3.1e)" % self._gv_delta_q
print(text)
if self._is_full_pp:
for f, v, pp in zip(frequencies, gv, ave_pp):
print("%8.3f (%8.3f %8.3f %8.3f) %8.3f %11.3e" %
(f, v[0], v[1], v[2], np.linalg.norm(v), pp))
else:
for f, v in zip(frequencies, gv):
print("%8.3f (%8.3f %8.3f %8.3f) %8.3f" %
(f, v[0], v[1], v[2], np.linalg.norm(v)))
sys.stdout.flush()
def _py_symmetrize_collision_matrix(self):
num_band = self._primitive.get_number_of_atoms() * 3
num_ir_grid_points = len(self._ir_grid_points)
for i in range(num_ir_grid_points):
for j in range(num_band):
for k in range(3):
for l in range(num_ir_grid_points):
for m in range(num_band):
for n in range(3):
self._py_set_symmetrized_element(
i, j, k, l, m, n)
def _py_set_symmetrized_element(self, i, j, k, l, m, n):
sym_val = (self._collision_matrix[:, :, i, j, k, l, m, n] +
self._collision_matrix[:, :, l, m, n, i, j, k]) / 2
self._collision_matrix[:, :, i, j, k, l, m, n] = sym_val
self._collision_matrix[:, :, l, m, n, i, j, k] = sym_val
def _py_symmetrize_collision_matrix_no_kappa_stars(self):
num_band = self._primitive.get_number_of_atoms() * 3
num_ir_grid_points = len(self._ir_grid_points)
for i in range(num_ir_grid_points):
for j in range(num_band):
for k in range(num_ir_grid_points):
for l in range(num_band):
self._py_set_symmetrized_element_no_kappa_stars(
i, j, k, l)
def _py_set_symmetrized_element_no_kappa_stars(self, i, j, k, l):
sym_val = (self._collision_matrix[:, :, i, j, k, l] +
self._collision_matrix[:, :, k, l, i, j]) / 2
self._collision_matrix[:, :, i, j, k, l] = sym_val
self._collision_matrix[:, :, k, l, i, j] = sym_val
|
atztogo/phono3py
|
phono3py/phonon3/conductivity_LBTE.py
|
Python
|
bsd-3-clause
| 70,974
|
[
"phonopy"
] |
e5825704eec975a50e045eaed95084012aa77e488b5000ac6eb562f7129d40cf
|
#################################################################
# $HeadURL$
#################################################################
"""
.. module:: Pfn
:synopsis: ProcessPool and related classes
ProcessPool
ProcessPool creates a pool of worker subprocesses to handle a queue of tasks
much like the producers/consumers paradigm. Users just need to fill the queue
with tasks to be executed and worker tasks will execute them.
To construct ProcessPool one first should call its constructor::
pool = ProcessPool( minSize, maxSize, maxQueuedRequests )
where parameters are:
:param int minSize: at least <minSize> workers will be alive all the time
:param int maxSize: no more than <maxSize> workers will be alive all the time
:param int maxQueuedRequests: size for request waiting in a queue to be executed
In case another request is added to the full queue, the execution will
lock until another request is taken out. The ProcessPool will automatically increase and
decrease the pool of workers as needed, of course not exceeding above limits.
To add a task to the queue one should execute:::
pool.createAndQueueTask( funcDef,
args = ( arg1, arg2, ... ),
kwargs = { "kwarg1" : value1, "kwarg2" : value2 },
callback = callbackDef,
exceptionCallback = exceptionCallBackDef )
or alternatively by using ProcessTask instance:::
task = ProcessTask( funcDef,
args = ( arg1, arg2, ... )
kwargs = { "kwarg1" : value1, .. },
callback = callbackDef,
exceptionCallback = exceptionCallbackDef )
pool.queueTask( task )
where parameters are:
:param funcDef: callable by object definition (function, lambda, class with __call__ slot defined
:param list args: argument list
:param dict kwargs: keyword arguments dictionary
:param callback: callback function definition
:param exceptionCallback: exception callback function definition
The callback, exceptionCallbaks and the parameters are all optional. Once task has been added to the pool,
it will be executed as soon as possible. Worker subprocesses automatically return the return value of the task.
To obtain those results one has to execute::
pool.processRequests()
This method will process the existing return values of the task, even if the task does not return
anything. This method has to be called to clean the result queues. To wait until all the requests are finished
and process their result call::
pool.processAllRequests()
This function will block until all requests are finished and their result values have been processed.
It is also possible to set the ProcessPool in daemon mode, in which all results are automatically
processed as soon they are available, just after finalization of task execution. To enable this mode one
has to call::
pool.daemonize()
Callback functions
There are two types of callbacks that can be executed for each tasks: exception callback function and
results callback function. The first one is executed when unhandled exception has been raised during
task processing, and hence no task results are available, otherwise the execution of second callback type
is performed.
The callbacks could be attached in a two places:
- directly in ProcessTask, in that case those have to be shelvable/picklable, so they should be defined as
global functions with the signature :callback( task, taskResult ): where :task: is a :ProcessTask:
reference and :taskResult: is whatever task callable it returning for results callback and
:exceptionCallback( task, exc_info): where exc_info is a
:S_ERROR( "Exception": { "Value" : exceptionName, "Exc_info" : exceptionInfo ):
- in ProcessPool, in that case there is no limitation on the function type, except the signature, which
should follow :callback( task ): or :exceptionCallback( task ):, as those callbacks definitions
are not put into the queues
The first types of callbacks could be used in case various callable objects are put into the ProcessPool,
so you probably want to handle them differently depending on their results, while the second types are for
executing same type of callables in subprocesses and hence you are expecting the same type of results
everywhere.
"""
__RCSID__ = "$Id$"
import multiprocessing
import sys
import time
import threading
import os
import signal
import Queue
from types import FunctionType, TypeType, ClassType
try:
from DIRAC.FrameworkSystem.Client.Logger import gLogger
except ImportError:
gLogger = None
try:
from DIRAC.Core.Utilities.LockRing import LockRing
except ImportError:
LockRing = None
try:
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
except ImportError:
def S_OK( val = "" ):
""" dummy S_OK """
return { 'OK' : True, 'Value' : val }
def S_ERROR( mess ):
""" dummy S_ERROR """
return { 'OK' : False, 'Message' : mess }
class WorkingProcess( multiprocessing.Process ):
"""
.. class:: WorkingProcess
WorkingProcess is a class that represents activity that runs in a separate process.
It is running main thread (process) in daemon mode, reading tasks from :pendingQueue:, executing
them and pushing back tasks with results to the :resultsQueue:. If task has got a timeout value
defined a separate threading.Timer thread is started killing execution (and destroying worker)
after :ProcessTask.__timeOut: seconds.
Main execution could also terminate in a few different ways:
* on every failed read attempt (from empty :pendingQueue:), the idle loop counter is increased,
worker is terminated when counter is reaching a value of 10;
* when stopEvent is set (so ProcessPool is in draining mode),
* when parent process PID is set to 1 (init process, parent process with ProcessPool is dead).
"""
def __init__( self, pendingQueue, resultsQueue, stopEvent, keepRunning ):
""" c'tor
:param self: self reference
:param : pendingQueue: queue storing ProcessTask before exection
:type multiprocessing.Queue pendingQueue
:param resultsQueue: queue storing callbacks and exceptionCallbacks
:type multiprocessing.Queue
:param stopEvent: event to stop processing
:type multiprocessing.Event
"""
multiprocessing.Process.__init__( self )
## daemonize
self.daemon = True
## flag to see if task is being treated
self.__working = multiprocessing.Value( 'i', 0 )
## task counter
self.__taskCounter = multiprocessing.Value( 'i', 0 )
## task queue
self.__pendingQueue = pendingQueue
## results queue
self.__resultsQueue = resultsQueue
## stop event
self.__stopEvent = stopEvent
## keep process running until stop event
self.__keepRunning = keepRunning
## placeholder for watchdog thread
self.__watchdogThread = None
## placeholder for process thread
self.__processThread = None
## placeholder for current task
self.task = None
## start yourself at least
self.start()
def __watchdog( self ):
"""
Watchdog thread target
Terminating/killing WorkingProcess when parent process is dead
:param self: self reference
"""
while True:
## parent is dead, commit suicide
if os.getppid() == 1:
os.kill( self.pid, signal.SIGTERM )
## wait for half a minute and if worker is still alive use REAL silencer
time.sleep(30)
## now you're dead
os.kill( self.pid, signal.SIGKILL )
## wake me up in 5 seconds
time.sleep(5)
def isWorking( self ):
"""
Check if process is being executed
:param self: self reference
"""
return self.__working.value == 1
def taskProcessed( self ):
"""
Tell how many tasks have been processed so far
:param self: self reference
"""
return self.__taskCounter
def __processTask( self ):
"""
processThread target
:param self: self reference
"""
if self.task:
self.task.process()
def run( self ):
"""
Task execution
Reads and executes ProcessTask :task: out of pending queue and then pushes it
to the results queue for callback execution.
:param self: self reference
"""
## start watchdog thread
self.__watchdogThread = threading.Thread( target = self.__watchdog )
self.__watchdogThread.daemon = True
self.__watchdogThread.start()
## http://cdn.memegenerator.net/instances/400x/19450565.jpg
if LockRing:
# Reset all locks
lr = LockRing()
lr._openAll()
lr._setAllEvents()
## zero processed task counter
taskCounter = 0
## zero idle loop counter
idleLoopCount = 0
## main loop
while True:
## draining, stopEvent is set, exiting
if self.__stopEvent.is_set():
return
## clear task
self.task = None
## read from queue
try:
task = self.__pendingQueue.get( block = True, timeout = 10 )
except Queue.Empty:
## idle loop?
idleLoopCount += 1
## 10th idle loop - exit, nothing to do
if idleLoopCount == 10 and not self.__keepRunning:
return
continue
## toggle __working flag
self.__working.value = 1
## save task
self.task = task
## reset idle loop counter
idleLoopCount = 0
## process task in a separate thread
self.__processThread = threading.Thread( target = self.__processTask )
self.__processThread.start()
timeout = False
noResults = False
## join processThread with or without timeout
if self.task.getTimeOut():
self.__processThread.join( self.task.getTimeOut()+10 )
else:
self.__processThread.join()
## processThread is still alive? stop it!
if self.__processThread.is_alive():
self.__processThread._Thread__stop()
self.task.setResult( S_ERROR("Timed out") )
timeout = True
# if the task finished with no results, something bad happened, e.g.
# undetected timeout
if not self.task.taskResults() and not self.task.taskException():
self.task.setResult( S_ERROR("Task produced no results") )
noResults = True
## check results and callbacks presence, put task to results queue
if self.task.hasCallback() or self.task.hasPoolCallback():
self.__resultsQueue.put( task )
if timeout or noResults:
# The task execution timed out, stop the process to prevent it from running
# in the background
time.sleep( 1 )
os.kill( self.pid, signal.SIGKILL )
return
## increase task counter
taskCounter += 1
self.__taskCounter = taskCounter
## toggle __working flag
self.__working.value = 0
class ProcessTask( object ):
""" Defines task to be executed in WorkingProcess together with its callbacks.
"""
## taskID
taskID = 0
def __init__( self,
taskFunction,
args = None,
kwargs = None,
taskID = None,
callback = None,
exceptionCallback = None,
usePoolCallbacks = False,
timeOut = 0 ):
""" c'tor
:warning: taskFunction has to be callable: it could be a function, lambda OR a class with
__call__ operator defined. But be carefull with interpretation of args and kwargs, as they
are passed to different places in above cases:
1. for functions or lambdas args and kwargs are just treated as function parameters
2. for callable classess (say MyTask) args and kwargs are passed to class contructor
(MyTask.__init__) and MyTask.__call__ should be a method without parameters, i.e.
MyTask definition should be::
class MyTask:
def __init__( self, *args, **kwargs ):
...
def __call__( self ):
...
:warning: depending on :timeOut: value, taskFunction execution can be forcefully terminated
using SIGALRM after :timeOut: seconds spent, :timeOut: equal to zero means there is no any
time out at all, except those during :ProcessPool: finalization
:param self: self reference
:param mixed taskFunction: definition of callable object to be executed in this task
:param tuple args: non-keyword arguments
:param dict kwargs: keyword arguments
:param int taskID: task id, if not set,
:param int timeOut: estimated time to execute taskFunction in seconds (default = 0, no timeOut at all)
:param mixed callback: result callback function
:param mixed exceptionCallback: callback function to be fired upon exception in taskFunction
"""
self.__taskFunction = taskFunction
self.__taskArgs = args or []
self.__taskKwArgs = kwargs or {}
self.__taskID = taskID
self.__resultCallback = callback
self.__exceptionCallback = exceptionCallback
self.__timeOut = 0
## set time out
self.setTimeOut( timeOut )
self.__done = False
self.__exceptionRaised = False
self.__taskException = None
self.__taskResult = None
self.__usePoolCallbacks = usePoolCallbacks
def taskResults( self ):
"""
Get task results
:param self: self reference
"""
return self.__taskResult
def taskException( self ):
"""
Get task exception
:param self: self reference
"""
return self.__taskException
def enablePoolCallbacks( self ):
"""
(re)enable use of ProcessPool callbacks
"""
self.__usePoolCallbacks = True
def disablePoolCallbacks( self ):
"""
Disable execution of ProcessPool callbacks
"""
self.__usePoolCallbacks = False
def usePoolCallbacks( self ):
"""
Check if results should be processed by callbacks defined in the :ProcessPool:
:param self: self reference
"""
return self.__usePoolCallbacks
def hasPoolCallback( self ):
"""
Check if asked to execute :ProcessPool: callbacks
:param self: self reference
"""
return self.__usePoolCallbacks
def setTimeOut( self, timeOut ):
"""
Set time out (in seconds)
:param self: selt reference
:param int timeOut: new time out value
"""
try:
self.__timeOut = int( timeOut )
return S_OK( self.__timeOut )
except (TypeError, ValueError), error:
return S_ERROR( str(error) )
def getTimeOut( self ):
"""
Get timeOut value
:param self: self reference
"""
return self.__timeOut
def hasTimeOutSet( self ):
"""
Check if timeout is set
:param self: self reference
"""
return bool( self.__timeOut != 0 )
def getTaskID( self ):
"""
TaskID getter
:param self: self reference
"""
return self.__taskID
def hasCallback( self ):
"""
Callback existence checking
:param self: self reference
:return: True if callback or exceptionCallback has been defined, False otherwise
"""
return self.__resultCallback or self.__exceptionCallback or self.__usePoolCallbacks
def exceptionRaised( self ):
"""
Flag to determine exception in process
:param self: self reference
"""
return self.__exceptionRaised
def doExceptionCallback( self ):
"""
Execute exceptionCallback
:param self: self reference
"""
if self.__done and self.__exceptionRaised and self.__exceptionCallback:
self.__exceptionCallback( self, self.__taskException )
def doCallback( self ):
"""
Execute result callback function
:param self: self reference
"""
if self.__done and not self.__exceptionRaised and self.__resultCallback:
self.__resultCallback( self, self.__taskResult )
def setResult( self, result ):
"""
Set taskResult to result
"""
self.__taskResult = result
def process( self ):
"""
Execute task
:param self: self reference
"""
self.__done = True
try:
## it's a function?
if type( self.__taskFunction ) is FunctionType:
self.__taskResult = self.__taskFunction( *self.__taskArgs, **self.__taskKwArgs )
## or a class?
elif type( self.__taskFunction ) in ( TypeType, ClassType ):
## create new instance
taskObj = self.__taskFunction( *self.__taskArgs, **self.__taskKwArgs )
### check if it is callable, raise TypeError if not
if not callable( taskObj ):
raise TypeError( "__call__ operator not defined not in %s class" % taskObj.__class__.__name__ )
### call it at least
self.__taskResult = taskObj()
except Exception, x:
self.__exceptionRaised = True
if gLogger:
gLogger.exception( "Exception in process of pool" )
if self.__exceptionCallback or self.usePoolCallbacks():
retDict = S_ERROR( 'Exception' )
retDict['Value'] = str( x )
retDict['Exc_info'] = sys.exc_info()[1]
self.__taskException = retDict
class ProcessPool( object ):
"""
.. class:: ProcessPool
ProcessPool
This class is managing multiprocessing execution of tasks (:ProcessTask: instances) in a separate
sub-processes (:WorkingProcess:).
Pool depth
The :ProcessPool: is keeping required number of active workers all the time: slave workers are only created
when pendingQueue is being filled with tasks, not exceeding defined min and max limits. When pendingQueue is
empty, active workers will be cleaned up by themselves, as each worker has got built in
self-destroy mechanism after 10 idle loops.
Processing and communication
The communication between :ProcessPool: instance and slaves is performed using two :multiprocessing.Queues:
* pendingQueue, used to push tasks to the workers,
* resultsQueue for revert direction;
and one :multiprocessing.Event: instance (stopEvent), which is working as a fuse to destroy idle workers
in a clean manner.
Processing of task begins with pushing it into :pendingQueue: using :ProcessPool.queueTask: or
:ProcessPool.createAndQueueTask:. Every time new task is queued, :ProcessPool: is checking existance of
active and idle workers and spawning new ones when required. The task is then read and processed on worker
side. If results are ready and callback functions are defined, task is put back to the resultsQueue and it is
ready to be picked up by ProcessPool again. To perform this last step one has to call :ProcessPool.processResults:,
or alternatively ask for daemon mode processing, when this function is called again and again in
separate background thread.
Finalisation
Finalization for task processing is done in several steps:
* if pool is working in daemon mode, background result processing thread is joined and stopped
* :pendingQueue: is emptied by :ProcessPool.processAllResults: function, all enqueued tasks are executed
* :stopEvent: is set, so all idle workers are exiting immediately
* non-hanging workers are joined and terminated politelty
* the rest of workers, if any, are forcefully retained by signals: first by SIGTERM, and if is doesn't work
by SIGKILL
:warn: Be carefull and choose wisely :timeout: argument to :ProcessPool.finalize:. Too short time period can
cause that all workers will be killed.
"""
def __init__( self, minSize = 2, maxSize = 0, maxQueuedRequests = 10,
strictLimits = True, poolCallback=None, poolExceptionCallback=None,
keepProcessesRunning=True ):
""" c'tor
:param self: self reference
:param int minSize: minimal number of simultaniously executed tasks
:param int maxSize: maximal number of simultaniously executed tasks
:param int maxQueueRequests: size of pending tasks queue
:param bool strictLimits: flag to workers overcommitment
:param callable poolCallbak: results callback
:param callable poolExceptionCallback: exception callback
"""
## min workers
self.__minSize = max( 1, minSize )
## max workers
self.__maxSize = max( self.__minSize, maxSize )
## queue size
self.__maxQueuedRequests = maxQueuedRequests
## flag to worker overcommit
self.__strictLimits = strictLimits
## pool results callback
self.__poolCallback = poolCallback
## pool exception callback
self.__poolExceptionCallback = poolExceptionCallback
## pending queue
self.__pendingQueue = multiprocessing.Queue( self.__maxQueuedRequests )
## results queue
self.__resultsQueue = multiprocessing.Queue( 0 )
## stop event
self.__stopEvent = multiprocessing.Event()
## keep processes running flag
self.__keepRunning = keepProcessesRunning
## lock
self.__prListLock = threading.Lock()
## workers dict
self.__workersDict = {}
## flag to trigger workers draining
self.__draining = False
## placeholder for daemon results processing
self.__daemonProcess = False
## create initial workers
self.__spawnNeededWorkingProcesses()
def stopProcessing( self, timeout=10 ):
"""
Case fire
:param self: self reference
"""
self.finalize( timeout )
def startProcessing( self ):
"""
Restart processing again
:param self: self reference
"""
self.__draining = False
self.__stopEvent.clear()
self.daemonize()
def setPoolCallback( self, callback ):
"""
Set ProcessPool callback function
:param self: self reference
:param callable callback: callback function
"""
if callable( callback ):
self.__poolCallback = callback
def setPoolExceptionCallback( self, exceptionCallback ):
"""
Set ProcessPool exception callback function
:param self: self refernce
:param callable exceptionCallback: exsception callback function
"""
if callable( exceptionCallback ):
self.__poolExceptionCallback = exceptionCallback
def getMaxSize( self ):
"""
MaxSize getter
:param self: self reference
"""
return self.__maxSize
def getMinSize( self ):
"""
MinSize getter
:param self: self reference
"""
return self.__minSize
def getNumWorkingProcesses( self ):
"""
Count processes currently being executed
:param self: self reference
"""
counter = 0
self.__prListLock.acquire()
try:
counter = len( [ pid for pid, worker in self.__workersDict.items() if worker.isWorking() ] )
finally:
self.__prListLock.release()
return counter
def getNumIdleProcesses( self ):
"""
Count processes being idle
:param self: self reference
"""
counter = 0
self.__prListLock.acquire()
try:
counter = len( [ pid for pid, worker in self.__workersDict.items() if not worker.isWorking() ] )
finally:
self.__prListLock.release()
return counter
def getFreeSlots( self ):
""" get number of free slots available for workers
:param self: self reference
"""
return max( 0, self.__maxSize - self.getNumWorkingProcesses() )
def __spawnWorkingProcess( self ):
"""
Create new process
:param self: self reference
"""
self.__prListLock.acquire()
try:
worker = WorkingProcess( self.__pendingQueue, self.__resultsQueue, self.__stopEvent, self.__keepRunning )
while worker.pid == None:
time.sleep(0.1)
self.__workersDict[ worker.pid ] = worker
finally:
self.__prListLock.release()
def __cleanDeadProcesses( self ):
"""
Delete references of dead workingProcesses from ProcessPool.__workingProcessList
"""
## check wounded processes
self.__prListLock.acquire()
try:
for pid, worker in self.__workersDict.items():
if not worker.is_alive():
del self.__workersDict[pid]
finally:
self.__prListLock.release()
def __spawnNeededWorkingProcesses( self ):
"""
Create N working process (at least self.__minSize, but no more
than self.__maxSize)
:param self: self reference
"""
self.__cleanDeadProcesses()
## if we're draining do not spawn new workers
if self.__draining or self.__stopEvent.is_set():
return
while len( self.__workersDict ) < self.__minSize:
if self.__draining or self.__stopEvent.is_set():
return
self.__spawnWorkingProcess()
while self.hasPendingTasks() and \
self.getNumIdleProcesses() == 0 and \
len( self.__workersDict ) < self.__maxSize:
if self.__draining or self.__stopEvent.is_set():
return
self.__spawnWorkingProcess()
time.sleep( 0.1 )
def queueTask( self, task, blocking = True, usePoolCallbacks= False ):
"""
Enqueue new task into pending queue
:param self: self reference
:param ProcessTask task: new task to execute
:param bool blocking: flag to block if necessary and new empty slot is available (default = block)
:param bool usePoolCallbacks: flag to trigger execution of pool callbacks (default = don't execute)
"""
if not isinstance( task, ProcessTask ):
raise TypeError( "Tasks added to the process pool must be ProcessTask instances" )
if usePoolCallbacks and ( self.__poolCallback or self.__poolExceptionCallback ):
task.enablePoolCallbacks()
self.__prListLock.acquire()
try:
self.__pendingQueue.put( task, block = blocking )
except Queue.Full:
self.__prListLock.release()
return S_ERROR( "Queue is full" )
finally:
self.__prListLock.release()
self.__spawnNeededWorkingProcesses()
## throttle a bit to allow task state propagation
time.sleep( 0.1 )
return S_OK()
def createAndQueueTask( self,
taskFunction,
args = None,
kwargs = None,
taskID = None,
callback = None,
exceptionCallback = None,
blocking = True,
usePoolCallbacks = False,
timeOut = 0 ):
"""
Create new processTask and enqueue it in pending task queue
:param self: self reference
:param mixed taskFunction: callable object definition (FunctionType, LambdaType, callable class)
:param tuple args: non-keyword arguments passed to taskFunction c'tor
:param dict kwargs: keyword arguments passed to taskFunction c'tor
:param int taskID: task Id
:param mixed callback: callback handler, callable object executed after task's execution
:param mixed exceptionCallback: callback handler executed if testFunction had raised an exception
:param bool blocking: flag to block queue if necessary until free slot is available
:param bool usePoolCallbacks: fire execution of pool defined callbacks after task callbacks
:param int timeOut: time you want to spend executing :taskFunction:
"""
task = ProcessTask( taskFunction, args, kwargs, taskID, callback, exceptionCallback, usePoolCallbacks, timeOut )
return self.queueTask( task, blocking )
def hasPendingTasks( self ):
"""
Check if taks are present in pending queue
:param self: self reference
:warning: results may be misleading if elements put into the queue are big
"""
return not self.__pendingQueue.empty()
def isFull( self ):
"""
Check in peding queue is full
:param self: self reference
:warning: results may be misleading if elements put into the queue are big
"""
return self.__pendingQueue.full()
def isWorking( self ):
"""
Check existence of working subprocesses
:param self: self reference
"""
return not self.__pendingQueue.empty() or self.getNumWorkingProcesses()
def processResults( self ):
"""
Execute tasks' callbacks removing them from results queue
:param self: self reference
"""
processed = 0
while True:
self.__cleanDeadProcesses()
if not self.__pendingQueue.empty():
self.__spawnNeededWorkingProcesses()
time.sleep( 0.1 )
if self.__resultsQueue.empty():
break
## get task
task = self.__resultsQueue.get()
## execute callbacks
try:
task.doExceptionCallback()
task.doCallback()
if task.usePoolCallbacks():
if self.__poolExceptionCallback and task.exceptionRaised():
self.__poolExceptionCallback( task.getTaskID(), task.taskException() )
if self.__poolCallback and task.taskResults():
self.__poolCallback( task.getTaskID(), task.taskResults() )
except Exception, error:
pass
processed += 1
return processed
def processAllResults( self, timeout=10 ):
"""
Process all enqueued tasks at once
:param self: self reference
"""
start = time.time()
while self.getNumWorkingProcesses() or not self.__pendingQueue.empty():
self.processResults()
time.sleep( 1 )
if time.time() - start > timeout:
break
self.processResults()
def finalize( self, timeout = 60 ):
"""
Drain pool, shutdown processing in more or less clean way
:param self: self reference
:param timeout: seconds to wait before killing
"""
## start drainig
self.__draining = True
## join deamon process
if self.__daemonProcess:
self.__daemonProcess.join( timeout )
## process all tasks
self.processAllResults( timeout )
## set stop event, all idle workers should be terminated
self.__stopEvent.set()
## join idle workers
start = time.time()
while self.__workersDict:
if timeout <= 0 or time.time() - start >= timeout:
break
time.sleep( 0.1 )
self.__cleanDeadProcesses()
## second clean up - join and terminate workers
for worker in self.__workersDict.values():
if worker.is_alive():
worker.terminate()
worker.join(5)
self.__cleanDeadProcesses()
## third clean up - kill'em all!!!
self.__filicide()
def __filicide( self ):
"""
Kill all workers, kill'em all!
:param self: self reference
"""
while self.__workersDict:
pid = self.__workersDict.keys().pop(0)
worker = self.__workersDict[pid]
if worker.is_alive():
os.kill( pid, signal.SIGKILL )
del self.__workersDict[pid]
def daemonize( self ):
"""
Make ProcessPool a finite being for opening and closing doors between
chambers.
Also just run it in a separate background thread to the death of
PID 0.
:param self: self reference
"""
if self.__daemonProcess:
return
self.__daemonProcess = threading.Thread( target = self.__backgroundProcess )
self.__daemonProcess.setDaemon( 1 )
self.__daemonProcess.start()
def __backgroundProcess( self ):
"""
Daemon thread target
:param self: self reference
"""
while True:
if self.__draining:
return
self.processResults()
time.sleep( 1 )
def __del__( self ):
"""
Delete slot
:param self: self reference
"""
self.finalize( timeout = 10 )
|
vmendez/DIRAC
|
Core/Utilities/ProcessPool.py
|
Python
|
gpl-3.0
| 31,382
|
[
"DIRAC"
] |
65c1c5e109a3463da84c33f43cc6d1319e7b2cd9812fce7d49bf6cef38666a28
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
from ansible import constants as C
ANSIBLE_COLOR=True
if C.ANSIBLE_NOCOLOR:
ANSIBLE_COLOR=False
elif not hasattr(sys.stdout, 'isatty') or not sys.stdout.isatty():
ANSIBLE_COLOR=False
else:
try:
import curses
curses.setupterm()
if curses.tigetnum('colors') < 0:
ANSIBLE_COLOR=False
except ImportError:
# curses library was not found
pass
except curses.error:
# curses returns an error (e.g. could not find terminal)
ANSIBLE_COLOR=False
if C.ANSIBLE_FORCE_COLOR:
ANSIBLE_COLOR=True
# --- begin "pretty"
#
# pretty - A miniature library that provides a Python print and stdout
# wrapper that makes colored terminal text easier to use (e.g. without
# having to mess around with ANSI escape sequences). This code is public
# domain - there is no license except that you must leave this header.
#
# Copyright (C) 2008 Brian Nez <thedude at bri1 dot com>
#
# http://nezzen.net/2008/06/23/colored-text-in-python-using-ansi-escape-sequences/
codeCodes = {
'black': u'0;30', 'bright gray': u'0;37',
'blue': u'0;34', 'white': u'1;37',
'green': u'0;32', 'bright blue': u'1;34',
'cyan': u'0;36', 'bright green': u'1;32',
'red': u'0;31', 'bright cyan': u'1;36',
'purple': u'0;35', 'bright red': u'1;31',
'yellow': u'0;33', 'bright purple': u'1;35',
'dark gray': u'1;30', 'bright yellow': u'1;33',
'magenta': u'0;35', 'bright magenta': u'1;35',
'normal': u'0' ,
}
def stringc(text, color):
"""String in color."""
if ANSIBLE_COLOR:
return "\n".join([u"\033[%sm%s\033[0m" % (codeCodes[color], t) for t in text.split('\n')])
else:
return text
# --- end "pretty"
def colorize(lead, num, color):
""" Print 'lead' = 'num' in 'color' """
s = u"%s=%-4s" % (lead, str(num))
if num != 0 and ANSIBLE_COLOR and color is not None:
s = stringc(s, color)
return s
def hostcolor(host, stats, color=True):
if ANSIBLE_COLOR and color:
if stats['failures'] != 0 or stats['unreachable'] != 0:
return u"%-37s" % stringc(host, C.COLOR_ERROR)
elif stats['changed'] != 0:
return u"%-37s" % stringc(host, C.COLOR_CHANGED)
else:
return u"%-37s" % stringc(host, C.COLOR_OK)
return u"%-26s" % host
|
camradal/ansible
|
lib/ansible/utils/color.py
|
Python
|
gpl-3.0
| 3,194
|
[
"Brian"
] |
e4e766e7c99cdcbbf4ee3a52eeb9ee59d0c8c40a215fc1cce4d7a6868d696d1a
|
import sys
import math
import time
from pymol import cmd
import os
# Point to Leap SDK
# leap_path = os.path.join()
# sys.path.append(os.environ["LEAPPATH"])
# Import Leap
import Leap
from Leap import Matrix, Vector, CircleGesture
class PymolListener(Leap.Listener):
def __init__(self, *args, **kwargs):
super(PymolListener, self).__init__(*args, **kwargs)
self.prev_frame = None
self.view_do_rotation = False
self.view_do_translation = False
self.mode = 'view' #this should be binary edit or view
self.controller = Leap.Controller()
self.controller.add_listener(self)
self.controller.set_policy(Leap.Controller.POLICY_BACKGROUND_FRAMES)
#self.controller.enable_gesture(Leap.Gesture.TYPE_CIRCLE)
self.circom=1
def __del__(self):
self.controller.remove_listener(self)
super(PymolListener, self).__del__()
def on_init(self, controller):
print "Initialized"
def on_connect(self, controller):
print "Connected"
# Enable gestures
self.controller.enable_gesture(Leap.Gesture.TYPE_CIRCLE)
self.controller.enable_gesture(Leap.Gesture.TYPE_SWIPE)
self.controller.config.set("Gesture.Swipe.MinVelocity",500)
self.controller.config.save()
def on_disconnect(self, controller):
print "Disconnected"
def on_exit(self, controller):
print "Exited"
def on_frame(self, controller):
frame = controller.frame()
#print self.view_do_rotation
if self.mode == 'view':
# Two hands and open hand on the leftmost should allow for rotation
if len(frame.hands) == 2 and frame.hands.leftmost.sphere_radius > 75:
self.view_do_rotation = True
# Two hands and closed hand on the leftmost should allow for translation
elif len(frame.hands) == 2 and frame.hands.leftmost.sphere_radius < 40:
self.view_do_translation = True
else:
self.view_do_rotation = False
self.view_do_translation = False
self.update_view(frame,self.view_do_rotation, self.view_do_translation)
self.prev_frame = frame
def update_view(self, frame, do_rotation, do_translation):
if not self.prev_frame:
return
#check what mode to set, also make directional in future
#currently disabled, does not match rest of the program
# if len(frame.hands) == 2:
# for gest in frame.gestures():
# if gest.type is Leap.Gesture.TYPE_SWIPE:
# if Leap.SwipeGesture(gest).direction.y > 0.5 and gest.duration_seconds > 0.15:
# time.sleep(0.3)
# if self.mode == 'view':
# self.mode = 'edit'
# cmd.bg_color("white")
# else:
# self.mode = 'view'
# cmd.bg_color("black")
# do_rotation = False
# do_translation = False
# print 'Changing mode to: ' + self.mode
# time.sleep(0.6)
# break
for gest in frame.gestures():
if gest.type is Leap.Gesture.TYPE_CIRCLE:
circle=Leap.CircleGesture(gest)
if circle.progress>=1.5:# and len(frame.hands)==1:
self.circom=0
if self.circom==0 and len(frame.gestures())==0:
self.circom=1
if len(frame.hands)==1:
cmd.center("all",0,1)
elif len(frame.hands)==2:
cmd.orient("all")
if frame.hands.rightmost.rotation_probability(self.prev_frame) > 0.1 and do_rotation == True:
#print 'rotating'
rotation_about_x = frame.hands.rightmost.rotation_angle(self.prev_frame,Vector.x_axis)
rotation_about_y = frame.hands.rightmost.rotation_angle(self.prev_frame,Vector.y_axis)
rotation_about_z = frame.hands.rightmost.rotation_angle(self.prev_frame,Vector.z_axis)
#print rotation_about_x, rotation_about_y, rotation_about_z
cmd.rotate('x',rotation_about_x*100)
cmd.rotate('y',rotation_about_y*100)
cmd.rotate('z',rotation_about_z*100)
#m = frame.hands.rightmost.rotation_matrix(self.prev_frame)
#print m
#m *= Matrix(Vector(*view[0:3]),
# Vector(*view[3:6]),
# Vector(*view[6:9]))
#view[:9] = m.to_array_3x3()
elif frame.hands.rightmost.translation_probability(self.prev_frame) > 0.1 and do_translation == True:
translation = frame.hands.rightmost.translation(self.prev_frame)
#print translation.to_float_array()
cmd.translate(translation.to_float_array())
#currently disabled
'''
elif self.mode == 'edit' and len(frame.hands) == 1:
if frame.hands[0].is_right:
translation = frame.hands[0].translation(self.prev_frame)
cmd.translate(translation.to_float_array(),'EcZapA.clean')
elif frame.hands[0].is_left:
translation = frame.hands[0].translation(self.prev_frame)
cmd.translate(translation.to_float_array(),'EcFtsZ_AB.clean')
'''
#experimental zoom function, undisable by removing quotes
'''
view = list(cmd.get_view())
if frame.scale_probability(self.prev_frame) > 0.9 and len(frame.hands)==1:
s = frame.scale_factor(self.prev_frame)
delta_z = math.log(s) * 100.0
view[11] += delta_z
view[15] -= delta_z
view[16] -= delta_z
cmd.set_view(view)
'''
# if __name__ == '__main__':
# listener = PymolListener()
|
lqtza/OcuMOL_Leap
|
ocumol/src/hands/leap_only.py
|
Python
|
apache-2.0
| 5,877
|
[
"PyMOL"
] |
6d280af82479218597e90062db601b56e204f59128f2cdafb8f1c2960731f2f6
|
# -*- coding: utf-8 -*-
"""An implementation of Reverse Causal Reasoning (RCR) described by [Catlett2013]_.
.. [Catlett2013] Catlett, N. L., *et al* (2013). `Reverse causal reasoning: applying qualitative causal knowledge to
the interpretation of high-throughput data <https://doi.org/10.1186/1471-2105-14-340>`_.
BMC Bioinformatics, 14(1), 340.
"""
from collections import defaultdict
import pandas
import scipy.stats
from scipy.special import binom
from pybel import BELGraph
from pybel.constants import CAUSAL_DECREASE_RELATIONS, CAUSAL_INCREASE_RELATIONS, RELATION
__all__ = [
'run_rcr',
]
def _point_probability(k, n, l, p: float = 0.5): # noqa:E741
return binom(n - l, k) * p ** k * (1 - p) ** (n - k - l)
def _concordance(k: int, n: int, m: int, l, p: float = 0.5): # noqa:E741
return sum(
_point_probability(j, n, l, p)
for j in range(k, min(n - 1, m))
)
def run_rcr(graph: BELGraph, tag: str = 'dgxp'):
"""Run the reverse causal reasoning algorithm on a graph.
Steps:
1. Get all downstream controlled things into map (that have at least 4 downstream things)
2. calculate population of all things that are downstream controlled
.. note:: Assumes all nodes have been pre-tagged with data
:param graph: A BEL graph
:param tag: The key for the nodes' data dictionaries that corresponds to the integer value for its differential
expression.
"""
# Step 1: Calculate the hypothesis subnetworks (just simple star graphs)
hypotheses = defaultdict(set)
increases = defaultdict(set)
decreases = defaultdict(set)
for u, v, d in graph.edges(data=True):
hypotheses[u].add(v)
if d[RELATION] in CAUSAL_INCREASE_RELATIONS:
increases[u].add(v)
elif d[RELATION] in CAUSAL_DECREASE_RELATIONS:
decreases[u].add(v)
# Step 2: Calculate the matching of the data points to the causal relationships
#: A dictionary from {tuple controller node: int count of correctly matching observations}
correct = defaultdict(int)
#: A dictionary from {tuple controller node: int count of incorrectly matching observations}
contra = defaultdict(int)
#: A dictionary from {tuple controller node: int count of ambiguous observations}
ambiguous = defaultdict(int)
#: A dictionary from {tuple controller node: int count of missing obvservations}
missing = defaultdict(int)
for controller, downstream_nodes in hypotheses.items():
if len(downstream_nodes) < 4:
continue # need enough data to make reasonable calculations!
for node in downstream_nodes:
if node in increases[controller] and node in decreases[controller]:
ambiguous[controller] += 1
elif node in increases[controller]:
if graph.nodes[node][tag] == 1:
correct[controller] += 1
elif graph.nodes[node][tag] == -1:
contra[controller] += 1
elif node in decreases[controller]:
if graph.nodes[node][tag] == 1:
contra[controller] += 1
elif graph.nodes[node][tag] == -1:
correct[controller] += 1
else:
missing[controller] += 1
# Step 3: Keep only controller nodes who have 4 or more downstream nodes
controllers = {
controller
for controller, downstream_nodes in hypotheses.items()
if 4 <= len(downstream_nodes)
}
# Step 4: Calculate concordance scores
concordance_scores = {
controller: scipy.stats.beta(0.5, correct[controller], contra[controller])
for controller in controllers
}
# Step 5: Calculate richness scores
# TODO
# Calculate the population as the union of all downstream nodes for all controllers
# population = {
# node
# for controller in controllers
# for node in hypotheses[controller]
# }
# population_size = len(population)
# Step 6: Export
return pandas.DataFrame({
'contra': contra,
'correct': correct,
'concordance': concordance_scores,
})
|
pybel/pybel-tools
|
src/pybel_tools/analysis/rcr.py
|
Python
|
mit
| 4,230
|
[
"Pybel"
] |
e55f00a8f214952cabf82a938d3485ce1c9b2d84b4d9f14d6fe8b32df35521e8
|
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from . import views
app_name="myaccounts"
urlpatterns = [
#url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^$', views.MainView.as_view(), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('myaccounts.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Logged In
url(r'^myaccounts/', include('myaccounts.accounts.urls', namespace='myaccounts')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
yohanswanepoel/django-openshift-js
|
config/urls.py
|
Python
|
mit
| 1,758
|
[
"VisIt"
] |
48856bf3865d929dc0c41d9264a3aaaf2097ed60eab48f85124e32eb7cbc2493
|
import numpy as np
from numpy.testing import assert_equal, assert_raises, assert_almost_equal
from skimage.measure import LineModelND, CircleModel, EllipseModel, ransac
from skimage.transform import AffineTransform
from skimage.measure.fit import _dynamic_max_trials
from skimage._shared._warnings import expected_warnings
def test_line_model_invalid_input():
assert_raises(ValueError, LineModelND().estimate, np.empty((1, 3)))
def test_line_model_predict():
model = LineModelND()
model.params = ((0, 0), (1, 1))
x = np.arange(-10, 10)
y = model.predict_y(x)
assert_almost_equal(x, model.predict_x(y))
def test_line_model_estimate():
# generate original data without noise
model0 = LineModelND()
model0.params = ((0, 0), (1, 1))
x0 = np.arange(-100, 100)
y0 = model0.predict_y(x0)
data = np.column_stack([x0, y0])
# estimate parameters of noisy data
model_est = LineModelND()
model_est.estimate(data)
# test whether estimated parameters almost equal original parameters
x = np.random.rand(100, 2)
assert_almost_equal(model0.predict(x), model_est.predict(x), 1)
def test_line_model_residuals():
model = LineModelND()
model.params = (np.array([0, 0]), np.array([0, 1]))
assert_equal(model.residuals(np.array([[0, 0]])), 0)
assert_equal(model.residuals(np.array([[0, 10]])), 0)
assert_equal(model.residuals(np.array([[10, 0]])), 10)
model.params = (np.array([-2, 0]), np.array([1, 1]) / np.sqrt(2))
assert_equal(model.residuals(np.array([[0, 0]])), np.sqrt(2))
assert_almost_equal(model.residuals(np.array([[-4, 0]])), np.sqrt(2))
def test_line_model_under_determined():
data = np.empty((1, 2))
assert_raises(ValueError, LineModelND().estimate, data)
def test_line_modelND_invalid_input():
assert_raises(ValueError, LineModelND().estimate, np.empty((5, 1)))
def test_line_modelND_predict():
model = LineModelND()
model.params = (np.array([0, 0]), np.array([0.2, 0.98]))
x = np.arange(-10, 10)
y = model.predict_y(x)
assert_almost_equal(x, model.predict_x(y))
def test_line_modelND_estimate():
# generate original data without noise
model0 = LineModelND()
model0.params = (np.array([0,0,0], dtype='float'),
np.array([1,1,1], dtype='float')/np.sqrt(3))
# we scale the unit vector with a factor 10 when generating points on the
# line in order to compensate for the scale of the random noise
data0 = (model0.params[0] +
10 * np.arange(-100,100)[...,np.newaxis] * model0.params[1])
# add gaussian noise to data
np.random.seed(1234)
data = data0 + np.random.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = LineModelND()
model_est.estimate(data)
# test whether estimated parameters are correct
# we use the following geometric property: two aligned vectors have
# a cross-product equal to zero
# test if direction vectors are aligned
assert_almost_equal(np.linalg.norm(np.cross(model0.params[1],
model_est.params[1])), 0, 1)
# test if origins are aligned with the direction
a = model_est.params[0] - model0.params[0]
if np.linalg.norm(a) > 0:
a /= np.linalg.norm(a)
assert_almost_equal(np.linalg.norm(np.cross(model0.params[1], a)), 0, 1)
def test_line_modelND_residuals():
model = LineModelND()
model.params = (np.array([0, 0, 0]), np.array([0, 0, 1]))
assert_equal(abs(model.residuals(np.array([[0, 0, 0]]))), 0)
assert_equal(abs(model.residuals(np.array([[0, 0, 1]]))), 0)
assert_equal(abs(model.residuals(np.array([[10, 0, 0]]))), 10)
def test_line_modelND_under_determined():
data = np.empty((1, 3))
assert_raises(ValueError, LineModelND().estimate, data)
def test_circle_model_invalid_input():
assert_raises(ValueError, CircleModel().estimate, np.empty((5, 3)))
def test_circle_model_predict():
model = CircleModel()
r = 5
model.params = (0, 0, r)
t = np.arange(0, 2 * np.pi, np.pi / 2)
xy = np.array(((5, 0), (0, 5), (-5, 0), (0, -5)))
assert_almost_equal(xy, model.predict_xy(t))
def test_circle_model_estimate():
# generate original data without noise
model0 = CircleModel()
model0.params = (10, 12, 3)
t = np.linspace(0, 2 * np.pi, 1000)
data0 = model0.predict_xy(t)
# add gaussian noise to data
np.random.seed(1234)
data = data0 + np.random.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = CircleModel()
model_est.estimate(data)
# test whether estimated parameters almost equal original parameters
assert_almost_equal(model0.params, model_est.params, 1)
def test_circle_model_residuals():
model = CircleModel()
model.params = (0, 0, 5)
assert_almost_equal(abs(model.residuals(np.array([[5, 0]]))), 0)
assert_almost_equal(abs(model.residuals(np.array([[6, 6]]))),
np.sqrt(2 * 6**2) - 5)
assert_almost_equal(abs(model.residuals(np.array([[10, 0]]))), 5)
def test_ellipse_model_invalid_input():
assert_raises(ValueError, EllipseModel().estimate, np.empty((5, 3)))
def test_ellipse_model_predict():
model = EllipseModel()
r = 5
model.params = (0, 0, 5, 10, 0)
t = np.arange(0, 2 * np.pi, np.pi / 2)
xy = np.array(((5, 0), (0, 10), (-5, 0), (0, -10)))
assert_almost_equal(xy, model.predict_xy(t))
def test_ellipse_model_estimate():
# generate original data without noise
model0 = EllipseModel()
model0.params = (10, 20, 15, 25, 0)
t = np.linspace(0, 2 * np.pi, 100)
data0 = model0.predict_xy(t)
# add gaussian noise to data
np.random.seed(1234)
data = data0 + np.random.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = EllipseModel()
model_est.estimate(data)
# test whether estimated parameters almost equal original parameters
assert_almost_equal(model0.params, model_est.params, 0)
def test_ellipse_model_residuals():
model = EllipseModel()
# vertical line through origin
model.params = (0, 0, 10, 5, 0)
assert_almost_equal(abs(model.residuals(np.array([[10, 0]]))), 0)
assert_almost_equal(abs(model.residuals(np.array([[0, 5]]))), 0)
assert_almost_equal(abs(model.residuals(np.array([[0, 10]]))), 5)
def test_ransac_shape():
np.random.seed(1)
# generate original data without noise
model0 = CircleModel()
model0.params = (10, 12, 3)
t = np.linspace(0, 2 * np.pi, 1000)
data0 = model0.predict_xy(t)
# add some faulty data
outliers = (10, 30, 200)
data0[outliers[0], :] = (1000, 1000)
data0[outliers[1], :] = (-50, 50)
data0[outliers[2], :] = (-100, -10)
# estimate parameters of corrupted data
model_est, inliers = ransac(data0, CircleModel, 3, 5)
# test whether estimated parameters equal original parameters
assert_equal(model0.params, model_est.params)
for outlier in outliers:
assert outlier not in inliers
def test_ransac_geometric():
np.random.seed(1)
# generate original data without noise
src = 100 * np.random.random((50, 2))
model0 = AffineTransform(scale=(0.5, 0.3), rotation=1,
translation=(10, 20))
dst = model0(src)
# add some faulty data
outliers = (0, 5, 20)
dst[outliers[0]] = (10000, 10000)
dst[outliers[1]] = (-100, 100)
dst[outliers[2]] = (50, 50)
# estimate parameters of corrupted data
model_est, inliers = ransac((src, dst), AffineTransform, 2, 20)
# test whether estimated parameters equal original parameters
assert_almost_equal(model0.params, model_est.params)
assert np.all(np.nonzero(inliers == False)[0] == outliers)
def test_ransac_is_data_valid():
np.random.seed(1)
is_data_valid = lambda data: data.shape[0] > 2
model, inliers = ransac(np.empty((10, 2)), LineModelND, 2, np.inf,
is_data_valid=is_data_valid)
assert_equal(model, None)
assert_equal(inliers, None)
def test_ransac_is_model_valid():
np.random.seed(1)
def is_model_valid(model, data):
return False
model, inliers = ransac(np.empty((10, 2)), LineModelND, 2, np.inf,
is_model_valid=is_model_valid)
assert_equal(model, None)
assert_equal(inliers, None)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 5
assert_equal(_dynamic_max_trials(1, 100, 5, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 5, 1), np.inf)
def test_ransac_invalid_input():
assert_raises(ValueError, ransac, np.zeros((10, 2)), None, min_samples=2,
residual_threshold=0, max_trials=-1)
assert_raises(ValueError, ransac, np.zeros((10, 2)), None, min_samples=2,
residual_threshold=0, stop_probability=-1)
assert_raises(ValueError, ransac, np.zeros((10, 2)), None, min_samples=2,
residual_threshold=0, stop_probability=1.01)
def test_deprecated_params_attribute():
model = LineModelND()
model.params = ((0, 0), (1, 1))
x = np.arange(-10, 10)
y = model.predict_y(x)
with expected_warnings(['`_params`']):
assert_equal(model.params, model._params)
if __name__ == "__main__":
np.testing.run_module_suite()
|
jwiggins/scikit-image
|
skimage/measure/tests/test_fit.py
|
Python
|
bsd-3-clause
| 10,423
|
[
"Gaussian"
] |
836c22ac8fad27d89e5d549894480d69b840625914270b7a6cf025207a36d98c
|
from CPAC.pipeline import nipype_pipeline_engine as pe
import nipype.algorithms.rapidart as ra
import nipype.interfaces.afni as afni
import nipype.interfaces.fsl as fsl
import nipype.interfaces.io as nio
import nipype.interfaces.utility as util
class TestSCA(object):
def __init__(self, rest_res_filt, ref, standard, fwhm, seed_list, rest_mask2standard, premat, postmat, extraction_space):
"""
Initialize Inputs
Constructor call to initialize the inputs to sca_preproc workflow
Parameters
----------
seed_list : a list of existing nifti files
A list of seeds/ ROI iin MNI space.
extraction_space : (a string)
Options are 'mni' or 'native'.
Extract time series from ROI in MNI space or extract it in subjects native space.
rest_res_filt : (an existing nifti file)
Band passed Image with Global Signal , white matter, csf and motion regression. Recommended bandpass filter (0.001,0.1) )
(rest_res_bandpassed.nii.gz)
ref : (an existing nifti file)
When Registering from MNI space to native space use the mean functional image in native space example_func.nii.gz is used.
standard : (an existing nifti file)
When registering from native to MNI MNI152_T1_STANDARD_RES.nii.gz is used(target space).
fwhm : (A list of floating point numbers)
For spatial smoothing the Z-transformed correlations in MNI space.
Generally the value of this parameter is 1.5 or 2 times the voxel size of the input Image.
Returns
-------
Nothing
"""
self.rest_res_filt = rest_res_filt
self.ref = ref
self.standard = standard
self.fwhm = fwhm
self.premat = premat
self.postmat = postmat
self.seed_list = seed_list
self.rest_mask2standard = rest_mask2standard
self.extraction_space = extraction_space
self.sca_preproc = self.setup_workflow()
def setup_workflow(self):
"""
Set up the workflow object by initializing it
Parameters
----------
self
Returns
-------
None
"""
self.sca_preproc = sca_preproc(self.extraction_space)
def teardown_workflow(self):
"""
Tear down the workflow by deleting the workflow object
Parameters
----------
self
Returns
-------
None
"""
del self.sca_preproc
def test_inputs(self):
"""
Test the workflow inputs
Parameters
----------
self
Returns
-------
Generates exceptions if any of the input tests fails
Notes
-----
seed_list :
Verify that inputs seeds lie inside the MNI brain(Not clear on how to verify it without the user supplied co-ordinates.
The ROI can be in native space but indicate MNI space in the header).
The input seed files are nifti files(.nii or .nii.gz).
All the seeds have the same resolution.
extraction_space :
Must be a string uppercase or lower case
Options are 'mni' or 'native'
rest_res_filt :
Test if the file is nifti file. Since band pass filtering can be turn on and off, we should decide if we need to test for temporal filtering
Test if the file is nuisance signal regressed according to the nuisance signal corrections specified(This test would duplicate testing efforts
between the nuisance signal regression workflow tests)
ref :
Test if the file is nifti file.
Test the co-ordinate space of the image; Compute spatial correlation with the Standard MNI image in the given resolution.
If the spatial correlation is 1 then the reference file is verified to be Standard MNI image; return 1.
Otherwise compute correlation between ref image and example_func nifti image generated from functional preprocessing workflow.
If the spatial correlation is 1 or epsilon close to 1 then return 1 else return 0.
fwhm :
Test if this is a list of floating point numbers.
"""
assert False
def test_output(self):
"""
Run the sca_preproc workflow and test all the outputs
Parameters
----------
self
Returns
-------
generate exceptions for each outputs that fail the test
Notes
-----
correlations_verification : (a boolean value)
Reports if correlations command works as expected.
z_trans_correlations_verication : (a boolean value)
Reports if Image normalization works as expected.
z_2standard_verification : (a boolean value)
Reports if registration to template space works as expected.
z_2standard_FWHM_verification : (a boolean value)
Reports if spatial smoothing on z_2standard image works as expected
Detailed_Description
- Verify Correlations :
On Real Data -
Extract mean TimeSeries using the ROI from rest_res_filt nifti image
Compute the map of Voxel wise correlations using nibabel and numpy
Compare the correlations calculated manually against the ones in the input Correlations image(correlations nifti image)
Since Comparison is between floating points keep a + - epsilon slack in comparison; epsilon ~= 0.005
Verify all values are between 0 & 1
If both these test evaluate to True then return True else Return False
On Simulation Data -
Not sure
- Verify Image Normalization :
On Real Data -
Use the voxel wise Correlation map from Correlations test to manually compute fisher-Z normalization scores
Compare the calculated correlation map values against the values from Z normalized input image
Check if the variance in the z scores is approx constant and that the correlation among values calculated in the prev two steps is epsilon closer to one
On Simulation Data -
Not sure
- Verify Image Registration :
On Real Data -
Compute the spatial correlation of the registered Image(z_2standard) with the reference nifti image (ref)
Compute percentage overlap between registered image and the reference image and
between the input image(z_trans_correlations) and the reference image
The correlation values obtained when registered image is used should be higher than the when the input image is used (Need to test).
If the spatial correlation and percentage overlaps are above their respective thresholds then return True else False
(TODO: Decide before hand on the values of thresholds, a way to do that could be an average of correlation values of subjects on a big dataset)
On Simulation Data -
Not sure
- Verify Guassian Kernel Smoothing :
On Real Data -
Verify that the intensiy distribution in smoothed image is still centered at zero and have unit variance
Load the z_2standard input image in python. Form the (2-D ) Guassian kernel matrix( TODO: determine the size of the gaussian matrix)
Perform convolution of the loaded input image and the gaussian matrix to get the guassian smoothed image
Perform mean filtering (substitute the intensity of the voxel with the average intesity of the neighbouring voxels)
Compare the image yeilded from the previous step with the smoothed image input image(z_2standard_FWHM)
if the values are +- epsilon away then return True else return False
On Simulation Data -
Not sure
"""
assert False
def test_warp_to_native(self):
"""
Checks if warping to native space works as expected
Set up the workflow object by initializing it
Parameters
----------
self
Returns
-------
generates an exception if warp_to_native_test fails
Notes
-----
On Real Data -
Compute the spatial correlation of the registered Image(outputs of the warp_to_native node) with the reference nifti image (ref)
Compute percentage overlap between registered image and the reference image and
between the input image(Seeds in MNI space) and the reference image
The correlation values obtained when registered image is used should be higher than the when the input image is used (Need to test).
On Synthetic Data -
- Not sure
"""
assert False
def test_time_series(self):
"""
checks if extraction of timeseries works as expected
Parameters
----------
self
Returns
-------
generates an exception if time_series_test fails
Notes
-----
On Real Data -
CASE_extraction_space_MNI: Extract the mean time series from the rest_res_filt in MNI space(do it for all the seeds in MNI space)
and find the correlation with the output of the timeseries node.
If the correlation is epsilon close to 1 then the data passes the test.
CASE_extraction_space_Native: Extract the mean time series from the rest_res_filt (warp all the seeds to Native space first)
and find the correlation with the output of the timeseries node.
If the correlation is epsilon close to 1 then the data passes the test.
On Simulation Data -
- Not sure
"""
assert False
def test_print_timeseries_to_file(self):
"""
checks if outputing timeseries list to a file works as expected
Parameters
----------
self
Returns
-------
generates an exception if print_timeseries_to_file_test fails
Notes
-----
On Real Data -
Read the timeseries from the timeseries file (output of print_timeseries_to_file node).
Correlate with output of timeseries node , The test is verified if the correlation is epsilon close to 1
On Simulation Data -
Not sure
"""
assert False
def test_warp_filt(self):
"""
checks if warping from functional space to MNI space works as expected
Parameters
----------
self
Returns
-------
generates an exception if warp_filt_test fails
Notes
-----
On Real Data -
Compute the spatial correlation of the registered Image(output of warp_filt node) with the reference nifti image (standard)
Compute percentage overlap between registered image and the reference image and
between the input image(rest_res_filt) and the reference image
The correlation values obtained when registered image is used should be higher than the when the input image is used (Need to test).
Also check the level of noise introduced in the bandpassed frequency band. If noise significant report bad registration( Need to define what significant is)
On Simulation Data -
Not Sure
"""
assert False
def test_z_trans(self):
"""
checks if fisher Z transformation works as expected
Parameters
----------
self
Returns
-------
generates an exception if z_trans_test fails
Notes
-----
On Real Data -
Measure the variance in z scores in a subject and between subjects as the correlation values vary, is approximately constant
On Simulation Data -
Not sure
"""
assert False
def test_warp_to_standard(self):
"""
checks if warping from functional space to MNI space works as expected
Parameters
----------
self
Returns
-------
generates an exception if warp_to_standard_test fails
Notes
-----
On Real Data -
Compute the spatial correlation of the registered Image(output of warp_to_standard node) with the reference nifti image (standard)
Compute percentage overlap between registered image and the reference image and
between the (z_trans_correlations) and the reference image
The correlation values obtained when registered image is used should be higher than the when the input image is used (Need to test).
Also verify that variation in intesity values is approximately constant(does not shoot up as the intensity approaches 1 and vice versa as it approaches -1)
in a subject and between subjects
On Simulation Data -
Not Sure
"""
assert False
def test_corr(self):
"""
tests if computing correlations works as expected
Parameters
----------
self
Returns
-------
generates an exception if corr_test fails
Notes
-----
On Real Data -
Extract mean TimeSeries using the ROI from rest_res_filt nifti image(use the seed and nifi image in appropriate co-ordinate space for extraction_space = 'mni' & 'native')
Compute the map of Voxel wise correlations using nibabel and numpy
Compare the correlations calculated manually against the ones in the input Correlations image(correlations nifti image)
Since Comparison is between floating points keep a + - epsilon slack in comparison; epsilon ~= 0.005
Verify all values are between 0 & 1
If both these test evaluate to True then return True else Return False
On Simulation Data -
Not sure
"""
assert False
def test_smooth_mni(self):
"""
test if spatial smoothing works as expected
Parameters
----------
self
Returns
-------
generates an exception if smooth_mni_test fails
Notes
-----
On Real Data -
Verify that the intensiy distribution in smoothed image is still centered at zero and have unit variance
Load the z_2standard input image in python. Form the (2-D ) Guassian kernel matrix( TODO: determine the size of the gaussian matrix)
Perform convolution of the loaded input image and the gaussian matrix to get the guassian smoothed image
Perform mean filtering (substitute the intensity of the voxel with the average intesity of the neighbouring voxels)
Compare the image yeilded from the previous step with the smoothed image input image(z_2standard_FWHM)
if the values are +- epsilon away then return True else return False
On Simulation Data -
Not sure
"""
assert False
|
FCP-INDI/C-PAC
|
CPAC/sca/tests/test_sca.py
|
Python
|
bsd-3-clause
| 15,189
|
[
"Gaussian"
] |
21fd77aa2057c9471c358eb11a7d5b8f1f429a6401510a5a738dce72de509e61
|
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example shows how to use validateOnly SOAP header.
Tags: CampaignService.mutate
Api: AdWordsOnly
"""
__author__ = 'api.kwinter@gmail.com (Kevin Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
from adspygoogle.adwords.AdWordsErrors import AdWordsRequestError
ad_group_id = 'INSERT_AD_GROUP_ID_HERE'
def main(client, ad_group_id):
# Initialize appropriate service with validate only flag enabled.
client.validate_only = True
ad_group_ad_service = client.GetAdGroupAdService(version='v201302')
# Construct operations to add a text ad.
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': ad_group_id,
'ad': {
'xsi_type': 'TextAd',
'url': 'http://www.example.com',
'displayUrl': 'example.com',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for everyone!',
'headline': 'Luxury Cruise to Mars'
}
}
}]
ad_group_ad_service.Mutate(operations)
# No error means the request is valid.
# Now let's check an invalid ad using a very long line to trigger an error.
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': ad_group_id,
'ad': {
'xsi_type': 'TextAd',
'url': 'http://www.example.com',
'displayUrl': 'example.com',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for all astronauts in orbit',
'headline': 'Luxury Cruise to Mars'
}
}
}]
try:
ad_group_ad_service.Mutate(operations)
except AdWordsRequestError, e:
print 'Validation correctly failed with \'%s\'.' % str(e)
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client, ad_group_id)
|
donspaulding/adspygoogle
|
examples/adspygoogle/adwords/v201302/campaign_management/validate_text_ad.py
|
Python
|
apache-2.0
| 2,895
|
[
"VisIt"
] |
23ae2957f35cf716bedaa0c9cc1c120b9672f9aa910a32bccac8e650b0cd614f
|
# $Id$
#
# Copyright (C) 2001-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" Functionality for SATIS typing atoms
"""
from rdkit import Chem
_debug = 0
#
# These are SMARTS patterns for the special cases used in
# SATIS typing.
#
aldehydePatt = Chem.MolFromSmarts('[CD2]=[OD1]')
ketonePatt = Chem.MolFromSmarts('[CD3]=[OD1]')
amidePatt = Chem.MolFromSmarts('[CD3](=[OD1])-[#7]')
esterPatt = Chem.MolFromSmarts('C(=[OD1])-O-[#6]')
carboxylatePatt = Chem.MolFromSmarts('C(=[OD1])-[OX1]')
carboxylPatt = Chem.MolFromSmarts('C(=[OD1])-[OX2]')
specialCases = ((carboxylatePatt,97),
(esterPatt,96),
(carboxylPatt,98),
(amidePatt,95),
(ketonePatt,94),
(aldehydePatt,93))
def SATISTypes(mol,neighborsToInclude=4):
""" returns SATIS codes for all atoms in a molecule
The SATIS definition used is from:
J. Chem. Inf. Comput. Sci. _39_ 751-757 (1999)
each SATIS code is a string consisting of _neighborsToInclude_ + 1
2 digit numbers
**Arguments**
- mol: a molecule
- neighborsToInclude (optional): the number of neighbors to include
in the SATIS codes
**Returns**
a list of strings nAtoms long
"""
global specialCases
nAtoms = mol.GetNumAtoms()
atomicNums = [0]*nAtoms
atoms = mol.GetAtoms()
for i in xrange(nAtoms):
atomicNums[i] = atoms[i].GetAtomicNum()
nSpecialCases = len(specialCases)
specialCaseMatches = [None]*nSpecialCases
for i,(patt,idx) in enumerate(specialCases):
if mol.HasSubstructMatch(patt):
specialCaseMatches[i] = mol.GetSubstructMatches(patt)
else:
specialCaseMatches[i] = ()
codes = [None]*nAtoms
for i in range(nAtoms):
code = [99]*(neighborsToInclude+1)
atom = atoms[i]
atomIdx = atom.GetIdx()
code[0] = min(atom.GetAtomicNum(),99)
bonds = atom.GetBonds()
nBonds = len(bonds)
otherIndices = [-1]*nBonds
if _debug: print code[0],
for j in range(nBonds):
otherIndices[j] = bonds[j].GetOtherAtom(atom).GetIdx()
if _debug: print otherIndices[j],
if _debug: print
otherNums = [atomicNums[x] for x in otherIndices] + \
[1]*atom.GetTotalNumHs()
otherNums.sort()
nOthers = len(otherNums)
if nOthers > neighborsToInclude:
otherNums.reverse()
otherNums = otherNums[:neighborsToInclude]
otherNums.reverse()
for j in range(neighborsToInclude):
code[j+1] = min(otherNums[j],99)
else:
for j in range(nOthers):
code[j+1] = min(otherNums[j],99)
if nOthers < neighborsToInclude and code[0] in [6,8]:
found = 0
for j in range(nSpecialCases):
for matchTuple in specialCaseMatches[j]:
if atomIdx in matchTuple:
code[-1] = specialCases[j][1]
found = 1
break
if found:
break
codes[i] = ''.join(['%02d'%(x) for x in code])
return codes
if __name__ == '__main__':
smis = ['CC(=O)NC','CP(F)(Cl)(Br)(O)',
'O=CC(=O)C','C(=O)OCC(=O)O','C(=O)[O-]']
for smi in smis:
print smi
m = Chem.MolFromSmiles(smi)
codes = SATISTypes(m)
print codes
|
rdkit/rdkit-orig
|
rdkit/Chem/SATIS.py
|
Python
|
bsd-3-clause
| 3,432
|
[
"RDKit"
] |
035b583fdfeefa167831c6bca50dc317dfe6ac5538a2e55e33a7769e5051b8e6
|
import numpy as np
def runge_kutta(t, efields, n_recorded, hamiltonian):
""" Evolves the hamiltonian in time using the runge_kutta method.
Parameters
----------
t : 1-D array of float
Time points, equally spaced array.
Shape T, number of timepoints
efields : ndarray <Complex>
Time dependant electric fields for all pulses.
SHape M x T where M is number of electric fields, T is number of time points.
n_recorded : int
Number of timesteps to record at the end of the simulation.
hamiltonian : Hamiltonian
The hamiltonian object which contains the inital conditions and the
function to use to obtain the matrices.
Returns
-------
ndarray : <Complex>
2-D array of recorded density vector elements for each time step in n_recorded.
"""
# can only call on n_recorded and t after efield_object.E is called
dt = np.abs(t[1] - t[0])
# extract attributes of the system
rho_emitted = np.empty((len(hamiltonian.recorded_indices), n_recorded), dtype=np.complex128)
# H has 3 dimensions: time and the 2 matrix dimensions
H = hamiltonian.matrix(efields, t)
# index to keep track of elements of rho_emitted
emitted_index = 0
rho_i = hamiltonian.rho.copy()
for k in range(len(t) - 1):
# calculate delta rho based on previous rho values
temp_delta_rho = np.dot(H[k], rho_i)
temp_rho_i = rho_i + temp_delta_rho * dt
# second order term
delta_rho = np.dot(H[k + 1], temp_rho_i)
rho_i += dt / 2 * (temp_delta_rho + delta_rho)
# if we are close enough to final coherence emission, start
# storing these values
if k >= len(t) - n_recorded:
for rec, native in enumerate(hamiltonian.recorded_indices):
rho_emitted[rec, emitted_index] = rho_i[native]
emitted_index += 1
# Last timestep
temp_delta_rho = np.dot(H[-1], rho_i)
rho_i += temp_delta_rho * dt
for rec, native in enumerate(hamiltonian.recorded_indices):
rho_emitted[rec, emitted_index] = rho_i[native]
return rho_emitted
muladd_cuda_source = """
/*
* muladd: Linear combination of two vectors with two scalar multiples
*
* computes a*b + c*d where a and c are vectors, b and d are scalars
* Values are stored in out.
* If out is the same as a or c, this is an in place operation.
* len defines the length to perform the computation.
* Generalizes to n-D array, given it is stored in contiguous memory.
*/
__device__ void muladd(pycuda::complex<double>* a, double b, pycuda::complex<double>* c, double d,
int len, pycuda::complex<double>* out)
{
for (int i=0; i<len; i++)
{
out[i] = a[i] * b + c[i] * d;
}
}
"""
dot_cuda_source = """
/*
* dot: Matrix multiplied by a vector.
*
* Expects a square NxN matrix and an N-length column vector.
* Values are written to out. DO NOT use in place, invalid results will be returned.
*
*/
__device__ void dot(pycuda::complex<double>* mat, pycuda::complex<double>* vec, int len,
pycuda::complex<double>* out)
{
for(int i=0; i<len; i++)
{
pycuda::complex<double> sum = pycuda::complex<double>();
for (int j=0; j<len; j++)
{
sum += vec[j] * mat[i * len + j];
}
out[i] = sum;
}
}
"""
pulse_cuda_source = """
#include <math.h>
/*
* calc_efield_params: convert efield params into appropriate values
*
* Converts FWHM to standard deviation
* Converts frequency into the rotating frame
* Converts area of peak to height
*
* Performs calculaiton on N contiguous sets of paramters, operation in place.
*
*/
__device__ void calc_efield_params(double* params, int n)
{
for(int i=0; i < n; i++)
{
// FWHM to sigma
params[1 + i*5] /= (2. * sqrt(log(2.)));
// Frequency to rotating frame
params[3 + i*5] *= 2 * M_PI * 3e-5;
// area -> y
params[0 + i*5] /= params[1 + i*5] * sqrt(2 * M_PI);
}
}
/*
* calc_efield: Convert parameters, phase matching, and time into an electric field
*
* Converts n electric fields at a time, places the complex electric
* field value into out, in contiguous fashion.
* The length of the phase_matiching array must be at least n.
*
*/
__device__ void calc_efield(double* params, int* phase_matching, double t, int n,
pycuda::complex<double>* out)
{
//TODO: ensure phase matching is done correctly for cases where
// it is not equal to +/- 1 (or 0, though why would you have 0)
// NISE took the sign, so far I have only taken the value
for(int i=0; i < n; i++)
{
// Complex phase and magnitude
out[i] = pycuda::exp(1. * I * ((double)(phase_matching[i]) *
(params[3 + i*5] * (t - params[2 + i*5]) + params[4 + i*5])));
// Gaussian envelope
out[i] *= params[0 + i*5] * exp(-1 * (t-params[2 + i*5]) * (t-params[2 + i*5])
/ 2. / params[1 + i*5] / params[1 + i*5]);
}
}
"""
runge_kutta_cuda_source = """
/*
* runge_kutta: Propagate electric fields over time using Runge-Kutta integration
*
* Parameters
* ----------
* time_start: inital simulation time
* time_end: final simulation time
* dt: time step
* nEFields: number of electric fields
* *efparams: pointer to array of parameters for those electric fields
* *phase_matiching: array of phase matching conditions
* n_recorded: number of output values to record
* ham: Hamiltonian struct containing inital values, passed to matrix generator.
*
* Output:
* *out: array of recorded values. expects enough memory for n_recorded * ham.nRecorded
* complex values
*/
__device__
pycuda::complex<double>* runge_kutta(const double time_start, const double time_end, const double dt,
const int nEFields, double* efparams, int* phase_matching,
const int n_recorded, Hamiltonian ham,
pycuda::complex<double> *out)
{
// Allocate arrays and pointers for the Hamiltonians for the current and next step.
//pycuda::complex<double> *H_cur = (pycuda::complex<double>*)malloc(ham.nStates * ham.nStates * sizeof(pycuda::complex<double>));
//pycuda::complex<double> *H_next = (pycuda::complex<double>*)malloc(ham.nStates * ham.nStates * sizeof(pycuda::complex<double>));
//TODO: either figure out why dynamically allocated arrays weren't working, or use a #define to statically allocate
pycuda::complex<double> buf1[81];
pycuda::complex<double> buf2[81];
pycuda::complex<double>* H_cur = buf1;
pycuda::complex<double>* H_next = buf2;
// Track indices in arrays.
int out_index = 0;
int index=0;
// determine number of points.
int npoints = (int)((time_end-time_start)/dt);
// Allocate vectors used in computation.
pycuda::complex<double>* rho_i = (pycuda::complex<double>*)malloc(ham.nStates * sizeof(pycuda::complex<double>));
pycuda::complex<double>* temp_delta_rho = (pycuda::complex<double>*)malloc(ham.nStates * sizeof(pycuda::complex<double>));
pycuda::complex<double>* temp_rho_i = (pycuda::complex<double>*)malloc(ham.nStates * sizeof(pycuda::complex<double>));
pycuda::complex<double>* delta_rho = (pycuda::complex<double>*)malloc(ham.nStates * sizeof(pycuda::complex<double>));
pycuda::complex<double>* efields = (pycuda::complex<double>*)malloc(nEFields * sizeof(pycuda::complex<double>));
// Inital rho vector.
//TODO: Use the inital condition from the hamiltonian
rho_i[0] = 1.;
for(int i=1; i<ham.nStates; i++) rho_i[i] = 0.;
// Convert from given units to simulation units.
calc_efield_params(efparams, nEFields);
// Compute the first set of electric fields.
calc_efield(efparams, phase_matching, time_start, nEFields, efields);
// Compute the inital matrix, stored in H_next, to be swapped
Hamiltonian_matrix(ham, efields, time_start, H_next);
for(double t = time_start; t < time_end; t += dt)
{
// Swap pointers to current and next hamiltonians
pycuda::complex<double>* temp = H_cur;
H_cur = H_next;
H_next = temp;
// First order
calc_efield(efparams, phase_matching, t+dt, nEFields, efields);
Hamiltonian_matrix(ham, efields, t+dt, H_next);
dot(H_cur, rho_i, ham.nStates, temp_delta_rho);
muladd(rho_i, 1., temp_delta_rho, dt, ham.nStates, temp_rho_i);
// Second order
dot(H_next, temp_rho_i, ham.nStates, delta_rho);
muladd(temp_delta_rho, 1., delta_rho, 1., ham.nStates, delta_rho);
muladd(rho_i, 1., delta_rho, dt/2., ham.nStates, rho_i);
// Record results if close enough to the end
if(index > npoints - n_recorded)
{
for(int i=0; i < ham.nRecorded; i++)
{
out[out_index + i * n_recorded] = rho_i[ham.recorded_indices[i]];
}
out_index++;
}
index++;
}
// Last point, only first order, recorded
dot(H_cur, rho_i, ham.nStates, temp_delta_rho);
muladd(rho_i, 1., temp_delta_rho, dt, ham.nStates, rho_i);
for(int i=0; i < ham.nRecorded; i++)
out[out_index + i * n_recorded] = rho_i[ham.recorded_indices[i]];
//free(H_next);
//free(H_cur);
free(rho_i);
free(temp_delta_rho);
free(temp_rho_i);
free(delta_rho);
free(efields);
return out;
}
"""
|
wright-group/WrightSim
|
WrightSim/mixed/propagate.py
|
Python
|
mit
| 9,642
|
[
"Gaussian"
] |
8708f55215b2eb7c60cb11e6012185cf1f2d1bee586bd2be7a8a39a024e1df50
|
"""
This example apply the abx evaluation on 2D data sampled from gaussian distributions (diagonal covariance)
"""
from __future__ import print_function
import abx_numpy
import numpy as np
def sample_data(parameters):
data = []
n_samples = []
for klass in parameters:
sample = np.empty((klass['N'], 2))
for i in range(2):
sample[:, i] = np.random.normal(klass['mean'][i],
klass['std'][i],
klass['N'])
data.append(sample)
n_samples.append(klass['N'])
classes = np.repeat(np.arange(len(parameters)), repeats=n_samples)
data = np.concatenate(data, axis=0)
return classes, data
def plot_data(parameters, data):
import matplotlib.pyplot as plt
assert len(parameters) <= 3, 'Cannot plot more than 3 classes'
i = 0
colors = ['r', 'g', 'b']
for n_klass, klass in enumerate(parameters):
plt.plot(*data[i:i+klass['N']].T, marker='o',
color=colors[n_klass], ls='',
label='class {}'.format(n_klass+1))
i += klass['N']
plt.legend(numpoints=1)
plt.title('Normally distributed data points (diagonal covariance)')
plt.show()
def evaluate():
parameters = [
{'mean': [1, 1], 'std': [0.5, 1], 'N': 100},
{'mean': [1, 3], 'std': [1, 1], 'N': 150},
{'mean': [3, 2], 'std': [0.5, 0.5], 'N': 200}
]
classes, data = sample_data(parameters)
plot_data(parameters, data)
results = abx_numpy.abx(classes, data, lambda x, y: np.linalg.norm(x - y))
print(results)
if __name__ == '__main__':
evaluate()
|
bootphon/abx_numpy
|
examples/2D_normal_example.py
|
Python
|
gpl-3.0
| 1,680
|
[
"Gaussian"
] |
72ed78c59a43fef859a47cf3f73ab8dbaee4b79c14f2f640b7a848dfa18c6240
|
import bpy
from io_scene_cs.utilities import rnaType, rnaOperator, B2CS, BoolProperty
from io_scene_cs.utilities import HasSetProperty, RemoveSetPropertySet
from io_scene_cs.utilities import RemovePanels, RestorePanels
class csFactoryPanel():
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "data"
b2cs_context = "data"
bl_label = ""
REMOVED = []
@classmethod
def poll(cls, context):
ob = bpy.context.active_object
r = (ob and ob.type == 'MESH' and ob.data)
if r:
csFactoryPanel.REMOVED = RemovePanels("data", ["DATA_PT_uv_texture", "DATA_PT_vertex_colors", "DATA_PT_vertex_groups"])
else:
RestorePanels(csFactoryPanel.REMOVED)
csFactoryPanel.REMOVED = []
return r
@rnaOperator
class MESH_OT_csFactory_RemoveProperty(bpy.types.Operator):
bl_idname = "csFactory_RemoveProperty"
bl_label = ""
def invoke(self, context, event):
ob = bpy.context.active_object.data
RemoveSetPropertySet(ob, self.properties.prop)
return('FINISHED',)
@rnaType
class MESH_PT_csFactory(csFactoryPanel, bpy.types.Panel):
bl_label = "Crystal Space Mesh Factory"
def LayoutAddProperty(self, row, ob, name):
split = row.split(percentage=0.5)
colL = split.column()
colR = split.column()
colL.prop(ob, name)
if not HasSetProperty(ob, name):
colR.label(text="(default: '%s')"%getattr(ob, name))
else:
d = colR.operator("csFactory_RemoveProperty", text="Default")
d.prop = name
def draw(self, context):
layout = self.layout
ob = bpy.context.active_object
if ob.type == 'MESH':
ob = bpy.context.active_object.data
row = layout.row()
row.prop(ob, "use_imposter")
row = layout.row()
row.prop(ob, "no_shadow_receive")
row = layout.row()
row.prop(ob, "no_shadow_cast")
row = layout.row()
row.prop(ob, "limited_shadow_cast")
BoolProperty(['Mesh'],
attr="use_imposter",
name="Imposter mesh",
description="Whether or not this mesh should use an imposter",
default=False)
BoolProperty(['Mesh'],
attr="no_shadow_receive",
name="No shadow receive",
description="Whether or not shadows can be cast on this mesh",
default=False)
BoolProperty(['Mesh'],
attr="no_shadow_cast",
name="No shadow cast",
description="Whether or not this mesh can cast shadows on other objects while in normal shadow casting mode",
default=False)
BoolProperty(['Mesh'],
attr="limited_shadow_cast",
name="Limited shadow cast",
description="Whether or not this mesh can cast shadows on other objects while in limited shadow casting mode",
default=False)
|
baoboa/Crystal-Space
|
scripts/blender/io_scene_cs/ui/data.py
|
Python
|
lgpl-2.1
| 2,734
|
[
"CRYSTAL"
] |
5bca0eba1cb3b03deff7d6dcd82ae9be2305925e4317e6ca9f1c4b3f93d50e9a
|
#!/usr/bin/env python
import os
from adaptivemd import Project
from adaptivemd import LocalResource
from adaptivemd import OpenMMEngine
from adaptivemd import PyEMMAAnalysis
from adaptivemd import File
from adaptivemd import WorkerScheduler
import mdtraj as md
if __name__ == '__main__':
Project.delete('example-simple-1')
project = Project('example-simple-1')
# --------------------------------------------------------------------------
# CREATE THE RESOURCE
# the instance to know about the place where we run simulations
# --------------------------------------------------------------------------
project.initialize(LocalResource())
# --------------------------------------------------------------------------
# CREATE THE ENGINE
# the instance to create trajectories
# --------------------------------------------------------------------------
pdb_file = File(
'file://../../examples/files/alanine/alanine.pdb').named('initial_pdb').load()
engine = OpenMMEngine(
pdb_file=pdb_file,
system_file=File('file://../../examples/files/alanine/system.xml').load(),
integrator_file=File('file://../../examples/files/alanine/integrator.xml').load(),
args='-r --report-interval 1 -p CPU --store-interval 1'
).named('openmm')
# --------------------------------------------------------------------------
# CREATE AN ANALYZER
# the instance that knows how to compute a msm from the trajectories
# --------------------------------------------------------------------------
modeller = PyEMMAAnalysis(
engine=engine
).named('pyemma')
project.generators.add(engine)
project.generators.add(modeller)
# --------------------------------------------------------------------------
# CREATE THE CLUSTER
# the instance that runs the simulations on the resource
# --------------------------------------------------------------------------
trajectory = project.new_trajectory(engine['pdb_file'], 100, engine)
task = engine.run(trajectory)
# project.queue(task)
pdb = md.load('../../examples/files/alanine/alanine.pdb')
cwd = os.getcwd()
# this part fakes a running worker without starting the worker process
worker = WorkerScheduler(project.resource)
worker.enter(project)
worker.submit(task)
assert(len(project.trajectories) == 0)
while not task.is_done():
worker.advance()
assert(len(project.trajectories) == 1)
traj_path = os.path.join(
worker.path,
'workers',
'worker.' + hex(task.__uuid__),
worker.replace_prefix(project.trajectories.one.url)
)
assert(os.path.exists(traj_path))
# go back to the place where we ran the test
traj = md.load(traj_path, top=pdb)
assert(len(traj) == 100)
# well, we have a 100 step trajectory which matches the size of the initial PDB
# that is a good sign
# extend the trajectory by 50
task2 = task.extend(50)
worker.submit(task2)
while not task2.is_done():
worker.advance()
# should still be one, since we have the same trajectory
assert(len(project.trajectories) == 1)
traj = md.load(traj_path, top=pdb)
assert (len(traj) == 150)
# after extension it is 150 frames. Excellent
project.close()
|
thempel/adaptivemd
|
adaptivemd/tests/test_simple.py
|
Python
|
lgpl-2.1
| 3,373
|
[
"MDTraj",
"OpenMM"
] |
f255d6a6fc9aefd3d8c48181de253bece2f5324bbc6003ca83f36902df0e951a
|
# coding: utf-8
from __future__ import division, unicode_literals
"""
Created on Mar 18, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 18, 2012"
import unittest
import os
from pymatgen.io.vaspio.vasp_output import Vasprun
from pymatgen.entries.computed_entries import ComputedEntry, \
ComputedStructureEntry
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
filepath = os.path.join(test_dir, 'vasprun.xml')
vasprun = Vasprun(filepath)
class ComputedEntryTest(unittest.TestCase):
def setUp(self):
self.entry = ComputedEntry(vasprun.final_structure.composition,
vasprun.final_energy,
parameters=vasprun.incar)
self.entry2 = ComputedEntry({"Fe": 2, "O": 3}, 2.3)
self.entry3 = ComputedEntry("Fe2O3", 2.3)
self.entry4 = ComputedEntry("Fe2O3", 2.3, entry_id=1)
def test_energy(self):
self.assertAlmostEqual(self.entry.energy, -269.38319884)
self.entry.correction = 1.0
self.assertAlmostEqual(self.entry.energy, -268.38319884)
self.assertAlmostEqual(self.entry3.energy_per_atom, 2.3 / 5)
def test_composition(self):
self.assertEqual(self.entry.composition.reduced_formula, "LiFe4(PO4)4")
self.assertEqual(self.entry2.composition.reduced_formula, "Fe2O3")
def test_to_from_dict(self):
d = self.entry.as_dict()
e = ComputedEntry.from_dict(d)
self.assertAlmostEqual(e.energy, -269.38319884)
def test_entry_id(self):
self.assertEqual(self.entry4.entry_id, 1)
self.assertEqual(self.entry2.entry_id, None)
def test_str(self):
self.assertIsNotNone(str(self.entry))
class ComputedStructureEntryTest(unittest.TestCase):
def setUp(self):
self.entry = ComputedStructureEntry(vasprun.final_structure,
vasprun.final_energy,
parameters=vasprun.incar)
def test_energy(self):
self.assertAlmostEqual(self.entry.energy, -269.38319884)
self.entry.correction = 1.0
self.assertAlmostEqual(self.entry.energy, -268.38319884)
def test_composition(self):
self.assertEqual(self.entry.composition.reduced_formula, "LiFe4(PO4)4")
def test_to_from_dict(self):
d = self.entry.as_dict()
e = ComputedStructureEntry.from_dict(d)
self.assertAlmostEqual(e.energy, -269.38319884)
def test_str(self):
self.assertIsNotNone(str(self.entry))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Dioptas/pymatgen
|
pymatgen/entries/tests/test_computed_entries.py
|
Python
|
mit
| 2,811
|
[
"pymatgen"
] |
1cb6deed82fa29bc5bacd35db7058dc9b11f28ff3c16538bd2076ad4a9c32868
|
from datetime import datetime
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
import time
from .vk_support import get_last_visit_vk
from .engine import *
from .models import *
def process_message(message_id):
message = session.query(Message)\
.filter(Message.id==message_id).one()
message.is_processed = True
session.add(message)
session.commit()
def process_timer(timer):
user = session.query(User)\
.filter(User.id==Message.user_id,
Message.id==timer.message_id)\
.all()
if not user:
raise Exception('Timer have no user? WTF?')
user = user[0]
print('user', user)
records = session.query(SourceRecord.url, Source.name)\
.filter(SourceRecord.user_id==user.id,
Source.id==SourceRecord.source_id)\
.all()
parse_vk_id = lambda x: int(x.split('id')[-1])
visits = []
for record in records:
print('record', record)
print('vk_id', parse_vk_id(record.url))
if record.name == 'vk':
last_visit = get_last_visit_vk(parse_vk_id(record.url))
visits.append(visit)
if not visits:
return
activity_marker = max(visits)
if activity_marker + relativedelta(minutes=timer.duration) > timer.next_checkdate:
timer.next_checkdate = activity_marker + relativedelta(minutes=timer.duration)
timer.last_checkdate = datetime.now()
try:
session.add(timer)
session.commit()
return
except:
session.rollback()
raise Exception('cant commit timer!')
else:
process_message(message_id)
def loop():
while 1:
current_timers = session.query(Timer)\
.filter(Timer.next_checkdate >= datetime.now() - relativedelta(minutes=20),
Timer.next_checkdate <= datetime.now())\
.all()
print('\n\ntimers', current_timers, '\n\n\n')
for t in current_timers:
process_timer(t)
time.sleep(900)
|
nixorn/iamripbackend
|
src/loop.py
|
Python
|
mit
| 2,235
|
[
"VisIt"
] |
6c678d4559e462c213c038517854a3ba73e8afd7c30013bd0fc411b3cbbe8432
|
# python3
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""bsuite logging and image observation wrappers."""
from typing import Any, Dict, Optional, Sequence
from bsuite import environments
from bsuite.logging import base
import dm_env
from dm_env import specs
import numpy as np
from skimage import transform
# Keys that are present for all experiments. These are computed from within
# the `Logging` wrapper.
STANDARD_KEYS = frozenset(
['steps', 'episode', 'total_return', 'episode_len', 'episode_return'])
class Logging(dm_env.Environment):
"""Environment wrapper to track and log bsuite stats."""
def __init__(self,
env: environments.Environment,
logger: base.Logger,
log_by_step: bool = False,
log_every: bool = False):
"""Initializes the logging wrapper.
Args:
env: Environment to wrap.
logger: An object that records a row of data. This must have a `write`
method that accepts a dictionary mapping from column name to value.
log_by_step: Whether to log based on step or episode count (default).
log_every: Forces logging at each step or episode, e.g. for debugging.
"""
self._env = env
self._logger = logger
self._log_by_step = log_by_step
self._log_every = log_every
# Accumulating throughout experiment.
self._steps = 0
self._episode = 0
self._total_return = 0.0
# Most-recent-episode.
self._episode_len = 0
self._episode_return = 0.0
def flush(self):
if hasattr(self._logger, 'flush'):
self._logger.flush()
def reset(self):
timestep = self._env.reset()
self._track(timestep)
return timestep
def step(self, action):
timestep = self._env.step(action)
self._track(timestep)
return timestep
def action_spec(self):
return self._env.action_spec()
def observation_spec(self):
return self._env.observation_spec()
def _track(self, timestep: dm_env.TimeStep):
# Count transitions only.
if not timestep.first():
self._steps += 1
self._episode_len += 1
if timestep.last():
self._episode += 1
self._episode_return += timestep.reward or 0.0
self._total_return += timestep.reward or 0.0
# Log statistics periodically, either by step or by episode.
if self._log_by_step:
if _logarithmic_logging(self._steps) or self._log_every:
self._log_bsuite_data()
elif timestep.last():
if _logarithmic_logging(self._episode) or self._log_every:
self._log_bsuite_data()
# Perform bookkeeping at the end of episodes.
if timestep.last():
self._episode_len = 0
self._episode_return = 0.0
if self._episode == self._env.bsuite_num_episodes:
self.flush()
def _log_bsuite_data(self):
"""Log summary data for bsuite."""
data = dict(
# Accumulated data.
steps=self._steps,
episode=self._episode,
total_return=self._total_return,
# Most-recent-episode data.
episode_len=self._episode_len,
episode_return=self._episode_return,
)
# Environment-specific metadata used for scoring.
data.update(self._env.bsuite_info())
self._logger.write(data)
@property
def raw_env(self):
# Recursively unwrap until we reach the true 'raw' env.
wrapped = self._env
if hasattr(wrapped, 'raw_env'):
return wrapped.raw_env
return wrapped
def __getattr__(self, attr):
"""Delegate attribute access to underlying environment."""
return getattr(self._env, attr)
def _logarithmic_logging(episode: int,
ratios: Optional[Sequence[float]] = None) -> bool:
"""Returns `True` only at specific ratios of 10**exponent."""
if ratios is None:
ratios = [1., 1.2, 1.4, 1.7, 2., 2.5, 3., 4., 5., 6., 7., 8., 9., 10.]
exponent = np.floor(np.log10(np.maximum(1, episode)))
special_vals = [10**exponent * ratio for ratio in ratios]
return any(episode == val for val in special_vals)
class ImageObservation(dm_env.Environment):
"""Environment wrapper to convert observations to an image-like format."""
def __init__(self, env: dm_env.Environment, shape: Sequence[int]):
self._env = env
self._shape = shape
def observation_spec(self):
spec = self._env.observation_spec()
return specs.Array(shape=self._shape, dtype=spec.dtype, name=spec.name)
def action_spec(self):
return self._env.action_spec()
def reset(self):
timestep = self._env.reset()
return timestep._replace(
observation=to_image(self._shape, timestep.observation))
def step(self, action):
timestep = self._env.step(action)
return timestep._replace(
observation=to_image(self._shape, timestep.observation))
def __getattr__(self, attr):
"""Delegate attribute access to underlying environment."""
return getattr(self._env, attr)
def _small_state_to_image(shape: Sequence[int],
observation: np.ndarray) -> np.ndarray:
"""Converts a small state into an image-like format."""
result = np.empty(shape=shape, dtype=observation.dtype)
size = observation.size
flattened = observation.ravel()
# Explicitly handle small observation dimensions separately
if size == 1:
result[:] = flattened[0]
elif size == 2:
result[:, :shape[1] // 2] = flattened[0]
result[:, shape[1] // 2:] = flattened[1]
elif size == 3 or size == 4:
# Top-left.
result[:shape[0] // 2, :shape[1] // 2] = flattened[0]
# Top-right.
result[shape[0] // 2:, :shape[1] // 2] = flattened[1]
# Bottom-left.
result[:shape[0] // 2, shape[1] // 2:] = flattened[2]
# Bottom-right.
result[shape[0] // 2:, shape[1] // 2:] = flattened[-1]
else:
raise ValueError('Hand-crafted rule only for small state observation.')
return result
def _interpolate_to_image(shape: Sequence[int],
observation: np.ndarray) -> np.ndarray:
"""Converts observation to desired shape using an interpolation."""
result = np.empty(shape=shape, dtype=observation.dtype)
if len(observation.shape) == 1:
observation = np.expand_dims(observation, 0)
# Interpolate the image and broadcast over all trailing channels.
plane_image = transform.resize(observation, shape[:2], preserve_range=True)
while plane_image.ndim < len(shape):
plane_image = np.expand_dims(plane_image, -1)
result[:, :] = plane_image
return result
def to_image(shape: Sequence[int], observation: np.ndarray) -> np.ndarray:
"""Converts a bsuite observation into an image-like format.
Example usage, converting a 3-element array into a stacked Atari-like format:
observation = to_image((84, 84, 4), np.array([1, 2, 0]))
Args:
shape: A sequence containing the desired output shape (length >= 2).
observation: A numpy array containing the observation data.
Returns:
A numpy array with shape `shape` and dtype matching the dtype of
`observation`. The entries in this array are tiled from `observation`'s
entries.
"""
assert len(shape) >= 2
if observation.size <= 4:
return _small_state_to_image(shape, observation)
elif len(observation.shape) <= 2:
return _interpolate_to_image(shape, observation)
else:
raise ValueError(
'Cannot convert observation shape {} to desired shape {}'.format(
observation.shape, shape))
class RewardNoise(environments.Environment):
"""Reward Noise environment wrapper."""
def __init__(self,
env: environments.Environment,
noise_scale: float,
seed: Optional[int] = None):
"""Builds the Reward Noise environment wrapper.
Args:
env: An environment whose rewards to perturb.
noise_scale: Standard deviation of gaussian noise on rewards.
seed: Optional seed for numpy's random number generator (RNG).
"""
super(RewardNoise, self).__init__()
self._env = env
self._noise_scale = noise_scale
self._rng = np.random.RandomState(seed)
def reset(self):
return self._env.reset()
def step(self, action):
return self._add_reward_noise(self._env.step(action))
def _add_reward_noise(self, timestep: dm_env.TimeStep):
if timestep.first():
return timestep
reward = timestep.reward + self._noise_scale * self._rng.randn()
return dm_env.TimeStep(
step_type=timestep.step_type,
reward=reward,
discount=timestep.discount,
observation=timestep.observation)
def observation_spec(self):
return self._env.observation_spec()
def action_spec(self):
return self._env.action_spec()
@property
def raw_env(self):
# Recursively unwrap until we reach the true 'raw' env.
wrapped = self._env
if hasattr(wrapped, 'raw_env'):
return wrapped.raw_env
return wrapped
def _step(self, action: int) -> dm_env.TimeStep:
raise NotImplementedError('Please call step() instead of _step().')
def _reset(self) -> dm_env.TimeStep:
raise NotImplementedError('Please call reset() instead of _reset().')
def bsuite_info(self) -> Dict[str, Any]:
return self._env.bsuite_info()
def __getattr__(self, attr):
"""Delegate attribute access to underlying environment."""
return getattr(self._env, attr)
class RewardScale(environments.Environment):
"""Reward Scale environment wrapper."""
def __init__(self,
env: environments.Environment,
reward_scale: float,
seed: Optional[int] = None):
"""Builds the Reward Scale environment wrapper.
Args:
env: Environment whose rewards to rescale.
reward_scale: Rescaling for rewards.
seed: Optional seed for numpy's random number generator (RNG).
"""
super(RewardScale, self).__init__()
self._env = env
self._reward_scale = reward_scale
self._rng = np.random.RandomState(seed)
def reset(self):
return self._env.reset()
def step(self, action):
return self._rescale_rewards(self._env.step(action))
def _rescale_rewards(self, timestep: dm_env.TimeStep):
if timestep.first():
return timestep
reward = timestep.reward * self._reward_scale
return dm_env.TimeStep(
step_type=timestep.step_type,
reward=reward,
discount=timestep.discount,
observation=timestep.observation)
def observation_spec(self):
return self._env.observation_spec()
def action_spec(self):
return self._env.action_spec()
def _step(self, action: int) -> dm_env.TimeStep:
raise NotImplementedError('Please call step() instead of _step().')
def _reset(self) -> dm_env.TimeStep:
raise NotImplementedError('Please call reset() instead of _reset().')
@property
def raw_env(self):
# Recursively unwrap until we reach the true 'raw' env.
wrapped = self._env
if hasattr(wrapped, 'raw_env'):
return wrapped.raw_env
return wrapped
def bsuite_info(self) -> Dict[str, Any]:
return self._env.bsuite_info()
def __getattr__(self, attr):
"""Delegate attribute access to underlying environment."""
return getattr(self._env, attr)
|
deepmind/bsuite
|
bsuite/utils/wrappers.py
|
Python
|
apache-2.0
| 11,819
|
[
"Gaussian"
] |
6f94b4a2fbc2aa2e4948322a5256c94cf43a348086361f91ebe8840f7f0a4b73
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# altair documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 7 12:52:48 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.githubpages',
'numpydoc.numpydoc',
'altair.sphinxext.altairplot',
'altair.sphinxext.altairgallery',
'altair.sphinxext.schematable'
]
altair_plot_links = {'editor': True, 'source': False, 'export': False}
autodoc_default_flags = ['members', 'inherited-members']
autodoc_member_order = 'groupwise'
# generate autosummary even if no references
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Altair'
copyright = '2016-2018, Altair Developers'
author = 'Brian Granger and Jake VanderPlas'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.2.0dev0'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'colorful'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = 'altair v1.0.0'
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'Altair'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/altair-logo-light.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static', '_images']
# adapted from: http://rackerlabs.github.io/docs-rackspace/tools/rtd-tables.html
# and
# https://github.com/rtfd/sphinx_rtd_theme/issues/117
def setup(app):
app.add_stylesheet('theme_overrides.css')
app.add_stylesheet('custom.css')
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'altairdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'altair.tex', 'altair Documentation',
'Brian Granger and Jake VanderPlas', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'altair', 'altair Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'altair', 'altair Documentation',
author, 'altair', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Hide extra class members
numpydoc_show_class_members = False
# For the altairplot extension
altairplot_links = {'editor': True, 'source': True, 'export': True}
altairplot_vega_js_url = "https://cdn.jsdelivr.net/npm/vega@5.4"
altairplot_vegalite_js_url = "https://cdn.jsdelivr.net/npm/vega-lite@3.3"
altairplot_vegaembed_js_url = "https://cdn.jsdelivr.net/npm/vega-embed@4.2"
|
jakevdp/altair
|
doc/conf.py
|
Python
|
bsd-3-clause
| 10,616
|
[
"Brian"
] |
1c549ae0e6a1be74161c695f91e9071709bddd1992b5893c6c7d35a083c380cc
|
"""
A library of spatial network kernel density functions.
Not to be used without permission.
Contact:
Andrew Winslow
GeoDa Center for Geospatial Analysis
Arizona State University
Tempe, AZ
Andrew.Winslow@asu.edu
"""
import operator
import unittest
import test
import priordict as priordict
import network as pynet
from math import exp, sqrt, pi
import time
def triangular(z):
return 1 - abs(z)
def uniform(z):
return abs(z)
def quadratic(z):
return 0.75*(1 - z*z)
def quartic(z):
return (3.0/pi)*(1-z*z)*(1-z*z)
#return (15*1.0/16)*(1-z*z)*(1-z*z)
def gaussian(z):
return sqrt(2*pi)*exp(-0.5*z*z)
def dijkstras_w_prev(G, start, r=1e600):
D = {} # dictionary of final distances
P = {} # dictionary of previous nodes
Q = priordict.PriorityDictionary() # est.dist. of non-final vert.
Q[start] = 0
P[start] = None
for v in Q:
D[v] = Q[v]
if v == None or D[v] > r:
break
for w in G[v]:
vwLength = D[v] + G[v][w]
if w in D:
pass
elif w not in Q or vwLength < Q[w]:
Q[w] = vwLength
P[w] = v
return (D, P)
def kernel_density(network, events, bandwidth, orig_nodes, kernel='quadratic'):
"""
This function estimates Kernel densities on a planar undirected network.
It implements the equal-split discontinuous Kernel function developed by Okabe et al. (2009).
Particularly, it computes Kernel densities by using equation 19 and 20
in the paper of Okabe et al. (2009).
Parameters
----------
network: A dictionary of dictionaries like {n1:{n2:d12,...},...}
A planar undirected network
It is assumed that this network is divided by a certain cell size
and is restructured to incorporate the new nodes resulting from the division as well as
events. Therefore, nodes in the network can be classified into three groups:
i) original nodes, 2) event points, and 3) cell points.
events: a list of tuples
a tuple is the network-projected coordinate of an event
that takes the form of (x,y)
bandwidth: a float
Kernel bandwidth
orig_nodes: a list of tuples
a tuple is the coordinate of a node that is part of the original base network
each tuple takes the form of (x,y)
kernel: string
the type of Kernel function
allowed values: 'quadratic', 'gaussian', 'quartic', 'uniform', 'triangular'
Returns
-------
A dictioinary where keys are node and values are their densities
Example: {n1:d1,n2:d2,...}
<tc>#is#kernel_density</tc>
"""
# beginning of step i
density = {}
for n in network:
density[n] = []
# end of step i
# beginning of step ii
def compute_split_multiplier(prev_D, n):
'''
computes the demoninator of the formula 19
Parameters
----------
prev_D: a dictionary storing pathes from n to other nodes in the network
its form is like: {n1:prev_node_of_n1(=n2), n2:prev_node_of_n2(=n3),...}
n: a tuple containing the geographic coordinate of a starting point
its form is like: (x,y)
Returns
-------
An integer
'''
split_multiplier = 1
p = prev_D[n]
while p != None:
if len(network[p]) > 1:
split_multiplier *= (len(network[p]) - 1)
if p not in prev_D:
p = None
else:
p = prev_D[p]
return split_multiplier
# end of step ii
kernel_funcs = {'triangular':triangular, 'uniform': uniform,
'quadratic': quadratic, 'quartic':quartic, 'gaussian':gaussian}
#t1 = time.time()
# beginning of step iii
kernel_func = kernel_funcs[kernel]
for e in events:
# beginning of step a
src_D = pynet.dijkstras(network, e, bandwidth, True)
# end of step a
# beginning of step b
density[e].append(kernel_func(0))
# end of step b
# beginning of step c
for n in src_D[0]: # src_D[0] - a dictionary of nodes whose distance from e is smaller than e
if src_D[0][n] == 0: continue
# src_D[1] - a dictionary from which a path from e to n can be traced
d = src_D[0][n]
if d <= bandwidth:
n_degree = 2.0
if n in events and n in orig_nodes and len(network[n]) > 0:
n_degree = len(network[n])
unsplit_density = kernel_func(d*1.0/bandwidth*1.0)
# src_D[1] - a dictionary from which a path from e to n can be traced
split_multiplier = compute_split_multiplier(src_D[1], n)
density[n].append((1.0/split_multiplier)*(2.0/n_degree)*unsplit_density)
#if str(n[0]) == '724900.335127' and str(n[1]) == '872127.948935':
# print 'event', e
# print 'distance', d
# print 'unsplit_density', unsplit_density
# print 'n_degree', n_degree
# print 'split_multiplier', split_multiplier
# print 'density', (1.0/split_multiplier)*(2.0/n_degree)*unsplit_density
# end of step c
# beginning of step iv
#t1 = time.time()
no_events = len(events)
for node in density:
if len(density[node]) > 0:
#if str(node[0]) == '724900.335127' and str(node[1]) == '872127.948935':
# print density[node]
density[node] = sum(density[node])/no_events
#density[node] = sum(density[node])*1.0/len(density[node])
else:
density[node] = 0.0
# end of step iv
#for node in events:
# del density[node]
#print 'normalizing density: %s' % (str(time.time() - t1))
return density
|
darribas/pysal
|
pysal/contrib/network/kernel.py
|
Python
|
bsd-3-clause
| 6,004
|
[
"Gaussian"
] |
0c565e8e83fd081222d6cec2e32b63ae91c850ada9d0d2e4bfbe77e9ebf38ba3
|
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Stamp_Uploaded_Files: A WebSubmit Function whose job is to stamp given
files that were uploaded during a submission.
"""
__revision__ = "$Id$"
from invenio.errorlib import register_exception
from invenio import websubmit_file_stamper
from invenio.websubmit_config import InvenioWebSubmitFunctionWarning, \
InvenioWebSubmitFunctionError, InvenioWebSubmitFileStamperError
import os.path, shutil, re
def Stamp_Uploaded_Files(parameters, curdir, form, user_info=None):
"""
Stamp certain files that have been uploaded during a submission.
@param parameters: (dictionary) - must contain:
+ latex_template: (string) - the name of the LaTeX template that
should be used for the creation of the stamp.
+ latex_template_vars: (string) - a string-ified dictionary
of variables to be replaced in the LaTeX template and the
values (or names of files in curdir containing the values)
with which to replace them. Use prefix 'FILE:' to specify
that the stamped value must be read from a file in
submission directory instead of being a fixed value to
stamp.
E.G.:
{ 'TITLE' : 'FILE:DEMOTHESIS_TITLE',
'DATE' : 'FILE:DEMOTHESIS_DATE'
}
+ files_to_be_stamped: (string) - The directories in which files
should be stamped: This is a comma-separated list of directory
names. E.g.:
DEMOTHESIS_MAIN,DEMOTHESIS_ADDITIONAL
(If you use Create_Upload_Files_Interface function, you
should know that uploaded files goes under a subdirectory
'updated/' of the /files/ folder in submission directory:
in this case you have to specify this component in the
parameter. For eg:
updated/DEMOTHESIS_MAIN,updated/DEMOTHESIS_ADDITIONAL)
+ stamp: (string) - the type of stamp to be applied to the files.
should be one of:
+ first (only the first page is stamped);
+ all (all pages are stamped);
+ coverpage (a separate cover-page is added to the file as a
first page);
+ layer: (string) - the position of the stamp. Should be one of:
+ background (invisible if original file has a white -
not transparent- background layer)
+ foreground (on top of the stamped file. If the stamp
does not have a transparent background,
will hide all of the document layers)
+ switch_file: (string) - when this value is set, specifies
the name of a file that will swith on/off the
stamping. The 'switch_file' must contain the names defined
in 'files_to_be_stamped' (comma-separated values). Stamp
will be applied only to files referenced in the
switch_file. No stamp is applied if the switch_file is
missing from the submission directory.
However if the no switch_file is defined in this variable
(parameter is left empty), stamps are applied according
the variable 'files_to_be_stamped'.
Useful for eg. if you want to let your users control the
stamping with a checkbox on your submission page.
If all goes according to plan, for each directory in which files are to
be stamped, the original, unstamped files should be found in a
directory 'files_before_stamping/DIRNAME', and the stamped versions
should be found under 'files/DIRNAME'. E.g., for DEMOTHESIS_Main:
- Unstamped: files_before_stamping/DEMOTHESIS_Main
- Stamped: files/DEMOTHESIS_Main
"""
## The file stamper needs to be called with a dictionary of options of
## the following format:
## { 'latex-template' : "", ## TEMPLATE_NAME
## 'latex-template-var' : {}, ## TEMPLATE VARIABLES
## 'input-file' : "", ## INPUT FILE
## 'output-file' : "", ## OUTPUT FILE
## 'stamp' : "", ## STAMP TYPE
## 'layer' : "", ## LAYER TO STAMP
## 'verbosity' : 0, ## VERBOSITY (we don't care about it)
## }
file_stamper_options = { 'latex-template' : "",
'latex-template-var' : { },
'input-file' : "",
'output-file' : "",
'stamp' : "",
'layer' : "",
'verbosity' : 0,
}
## A dictionary of arguments to be passed to visit_for_stamping:
visit_for_stamping_arguments = { 'curdir' : curdir,
'file_stamper_options' : \
file_stamper_options,
'user_info' : user_info
}
## Start by getting the parameter-values from WebSubmit:
## The name of the LaTeX template to be used for stamp creation:
latex_template = "%s" % ((type(parameters['latex_template']) is str \
and parameters['latex_template']) or "")
## A string containing the variables/values that should be substituted
## in the final (working) LaTeX template:
latex_template_vars_string = "%s" % \
((type(parameters['latex_template_vars']) is str \
and parameters['latex_template_vars']) or "")
## The type of stamp to be applied to the file(s):
stamp = "%s" % ((type(parameters['stamp']) is str and \
parameters['stamp'].lower()) or "")
## The layer to use for stamping:
try:
layer = parameters['layer']
except KeyError:
layer = "background"
if not layer in ('background', 'foreground'):
layer = "background"
## The directories in which files should be stamped:
## This is a comma-separated list of directory names. E.g.:
## DEMOTHESIS_MAIN,DEMOTHESIS_ADDITIONAL
stamp_content_of = "%s" % ((type(parameters['files_to_be_stamped']) \
is str and parameters['files_to_be_stamped']) \
or "")
## Now split the list (of directories in which to stamp files) on commas:
if stamp_content_of.strip() != "":
stamping_locations = stamp_content_of.split(",")
else:
stamping_locations = []
## Check if stamping is enabled
switch_file = parameters.get('switch_file', '')
if switch_file:
# Good, a "switch file" was specified. Check if it exists, and
# it its value is not empty.
switch_file_content = ''
try:
fd = file(os.path.join(curdir, switch_file))
switch_file_content = fd.read().split(',')
fd.close()
except:
switch_file_content = ''
if not switch_file_content:
# File does not exist, or is emtpy. Silently abort
# stamping.
return ""
else:
stamping_locations = [location for location in stamping_locations \
if location in switch_file_content]
if len(stamping_locations) == 0:
## If there are no items to be stamped, don't continue:
return ""
## Strip the LaTeX filename into the basename (All templates should be
## in the template repository):
latex_template = os.path.basename(latex_template)
## Convert the string of latex template variables into a dictionary
## of search-term/replacement-term pairs:
latex_template_vars = get_dictionary_from_string(latex_template_vars_string)
## For each of the latex variables, check in `CURDIR' for a file with that
## name. If found, use it's contents as the template-variable's value.
## If not, just use the raw value string already held by the template
## variable:
latex_template_varnames = latex_template_vars.keys()
for varname in latex_template_varnames:
## Get this variable's value:
varvalue = latex_template_vars[varname].strip()
if not ((varvalue.find("date(") == 0 and varvalue[-1] == ")") or \
(varvalue.find("include(") == 0 and varvalue[-1] == ")")) \
and varvalue != "":
## We don't want to interfere with date() or include() directives,
## so we only do this if the variable value didn't contain them:
##
## Is this variable value the name of a file in the current
## submission's working directory, from which a literal value for
## use in the template should be extracted? If yes, it will
## begin with "FILE:". If no, we leave the value exactly as it is.
if varvalue.upper().find("FILE:") == 0:
## The value to be used is to be taken from a file. Clean the
## file name and if it's OK, extract that value from the file.
##
seekvalue_fname = varvalue[5:].strip()
seekvalue_fname = os.path.basename(seekvalue_fname).strip()
if seekvalue_fname != "":
## Attempt to extract the value from the file:
if os.access("%s/%s" % (curdir, seekvalue_fname), \
os.R_OK|os.F_OK):
## The file exists. Extract its value:
try:
repl_file_val = \
open("%s/%s" \
% (curdir, seekvalue_fname), "r").readlines()
except IOError:
## The file was unreadable.
err_msg = "Error in Stamp_Uploaded_Files: The " \
"function attempted to read a LaTex " \
"template variable value from the " \
"following file in the current " \
"submission's working directory: " \
"[%s]. However, an unexpected error " \
"was encountered when doing so. " \
"Please inform the administrator." \
% seekvalue_fname
register_exception(req=user_info['req'])
raise InvenioWebSubmitFunctionError(err_msg)
else:
final_varval = ""
for line in repl_file_val:
final_varval += line
final_varval = final_varval.rstrip()
## Replace the variable value with that which has
## been read from the file:
latex_template_vars[varname] = final_varval
else:
## The file didn't actually exist in the current
## submission's working directory. Use an empty
## value:
latex_template_vars[varname] = ""
else:
## The filename was not valid.
err_msg = "Error in Stamp_Uploaded_Files: The function " \
"was configured to read a LaTeX template " \
"variable from a file with the following " \
"instruction: [%s --> %s]. The filename, " \
"however, was not considered valid. Please " \
"report this to the administrator." \
% (varname, varvalue)
raise InvenioWebSubmitFunctionError(err_msg)
## Put the 'fixed' values into the file_stamper_options dictionary:
file_stamper_options['latex-template'] = latex_template
file_stamper_options['latex-template-var'] = latex_template_vars
file_stamper_options['stamp'] = stamp
file_stamper_options['layer'] = layer
for stampdir in stamping_locations:
## Create the full path to the stamp directory - it is considered
## to be under 'curdir' - the working directory for the current
## submission:
path_to_stampdir = "%s/files/%s" % (curdir, stampdir.strip())
## Call os.path.walk, passing it the path to the directory to be
## walked, the visit_for_stamping function (which will call the
## file-stamper for each file within that directory), and the
## dictionary of options to be passed to the file-stamper:
try:
os.path.walk(path_to_stampdir, \
visit_for_stamping, \
visit_for_stamping_arguments)
except InvenioWebSubmitFunctionWarning:
## Unable to stamp the files in stampdir. Register the exception
## and continue to try to stamp the files in the other stampdirs:
## FIXME - The original exception was registered in 'visit'.
## Perhaps we should just send the message contained in this
## warning to the admin?
register_exception(req=user_info['req'])
continue
except InvenioWebSubmitFunctionError, err:
## Unexpected error in stamping. The admin should be contacted
## because it has resulted in an unstable situation with the
## files. They are no longer in a well-defined state - some may
## have been lost and manual intervention by the admin is needed.
## FIXME - should this be reported here, or since we propagate it
## up to websubmit_engine anyway, should we let it register it?
register_exception(req=user_info['req'])
raise err
return ""
def visit_for_stamping(visit_for_stamping_arguments, dirname, filenames):
"""Visitor function called by os.path.walk.
This function takes a directory and a list of files in that directory
and for each file, calls the websubmit_file_stamper on it.
When a file is stamped, the original is moved away into a directory
of unstamped files and the new, stamped version is moved into its
place.
@param visit_for_stamping_arguments: (dictionary) of arguments needed
by this function. Must contain 'curdir', 'user_info' and
'file_stamper_options' members.
@param dirname: (string) - the path to the directory in which the
files are to be stamped.
@param filenames: (list) - the names of each file in dirname. An
attempt will be made to stamp each of these files.
@Exceptions Raised:
+ InvenioWebSubmitFunctionWarning;
+ InvenioWebSubmitFunctionError;
"""
## Get the dictionary of options to pass to the stamper:
file_stamper_options = visit_for_stamping_arguments['file_stamper_options']
## Create a directory to store original files before stamping:
dirname_files_pre_stamping = dirname.replace("/files/", \
"/files_before_stamping/", 1)
if not os.path.exists(dirname_files_pre_stamping):
try:
os.makedirs(dirname_files_pre_stamping)
except OSError, err:
## Unable to make a directory in which to store the unstamped
## files.
## Register the exception:
exception_prefix = "Unable to stamp files in [%s]. Couldn't " \
"create directory in which to store the " \
"original, unstamped files." \
% dirname
register_exception(prefix=exception_prefix)
## Since we can't make a directory for the unstamped files,
## we can't continue to stamp them.
## Skip the stamping of the contents of this directory by raising
## a warning:
msg = "Warning: A problem occurred when stamping files in [%s]. " \
"Unable to create directory to store the original, " \
"unstamped files. Got this error: [%s]. This means the " \
"files in this directory were not stamped." \
% (dirname, str(err))
raise InvenioWebSubmitFunctionWarning(msg)
## Loop through each file in the directory and attempt to stamp it:
for file_to_stamp in filenames:
## Get the path to the file to be stamped and put it into the
## dictionary of options that will be passed to stamp_file:
path_to_subject_file = "%s/%s" % (dirname, file_to_stamp)
file_stamper_options['input-file'] = path_to_subject_file
## Just before attempting to stamp the file, log the dictionary of
## options (file_stamper_options) that will be passed to websubmit-
## file-stamper:
try:
fh_log = open("%s/websubmit_file_stamper-calls-options.log" \
% visit_for_stamping_arguments['curdir'], "a+")
fh_log.write("%s\n" % file_stamper_options)
fh_log.flush()
fh_log.close()
except IOError:
## Unable to log the file stamper options.
exception_prefix = "Unable to write websubmit_file_stamper " \
"options to log file " \
"%s/websubmit_file_stamper-calls-options.log" \
% visit_for_stamping_arguments['curdir']
register_exception(prefix=exception_prefix)
try:
## Try to stamp the file:
(stamped_file_path_only, stamped_file_name) = \
websubmit_file_stamper.stamp_file(file_stamper_options)
except InvenioWebSubmitFileStamperError:
## It wasn't possible to stamp this file.
## Register the exception along with an informational message:
exception_prefix = "A problem occurred when stamping [%s]. The " \
"stamping of this file was unsuccessful." \
% path_to_subject_file
register_exception(prefix=exception_prefix)
## Skip this file, moving on to the next:
continue
else:
## Stamping was successful.
path_to_stamped_file = "%s/%s" % (stamped_file_path_only, \
stamped_file_name)
## Move the unstamped file from the "files" directory into the
## "files_before_stamping" directory:
try:
shutil.move(path_to_subject_file, "%s/%s" \
% (dirname_files_pre_stamping, file_to_stamp))
except IOError:
## Couldn't move the original file away from the "files"
## directory. Log the problem and continue on to the next
## file:
exception_prefix = "A problem occurred when stamping [%s]. " \
"The file was sucessfully stamped, and " \
"can be found here: [%s]. Unfortunately " \
"though, it could not be copied back to " \
"the current submission's working " \
"directory because the unstamped version " \
"could not be moved out of the way (tried " \
"to move it from here [%s] to here: " \
"[%s/%s]). The stamping of this file was " \
"unsuccessful." \
% (path_to_subject_file, \
path_to_stamped_file, \
path_to_subject_file, \
dirname_files_pre_stamping, \
file_to_stamp)
register_exception(prefix=exception_prefix)
continue
else:
## The original file has been moved into the files before
## stamping directory. Now try to copy the stamped file into
## the files directory:
try:
shutil.copy(path_to_stamped_file, "%s/%s" \
% (dirname, file_to_stamp))
except IOError:
## Even though the original, unstamped file was moved away
## from the files directory, the stamped-version couldn't
## be moved into its place. Register the exception:
exception_prefix = "A problem occurred when stamping " \
"[%s]. The file was sucessfully " \
"stamped, and can be found here: " \
"[%s]. Unfortunately though, it " \
"could not be copied back to the " \
"current submission's working " \
"directory." % (path_to_subject_file, \
path_to_stamped_file)
register_exception(prefix=exception_prefix)
## Because it wasn't possible to move the stamped file
## into the files directory, attempt to move the original,
## unstamped file back into the files directory:
try:
shutil.move("%s/%s" % (dirname_files_pre_stamping, \
file_to_stamp), \
path_to_stamped_file)
except IOError, err:
## It wasn't possible even to move the original file
## back to the files directory. Register the
## exception and stop the stamping process - it isn't
## safe to continue:
exeption_prefix = "A problem occurred when stamping " \
"[%s]. The file was sucessfully " \
"stamped, and can be found here: " \
"[%s]. Unfortunately though, it " \
"could not be copied back to the " \
"current submission's working " \
"directory. Additionionally, the " \
"original, unstamped file " \
"could not be moved back to the " \
"files directory, from the files-" \
"before-stamping directory. It " \
"can now be found here: [%s/%s]. " \
"Stamping cannot continue and " \
"manual intervention is necessary " \
"because the file cannot be " \
"attached to the record." \
% (path_to_subject_file, \
path_to_stamped_file, \
dirname_files_pre_stamping, \
file_to_stamp)
register_exception(prefix=exeption_prefix)
## Raise an InvenioWebSubmitFunctionError, stopping
## further stamping, etc:
raise InvenioWebSubmitFunctionError(exception_prefix)
def get_dictionary_from_string(dict_string):
"""Given a string version of a "dictionary", split the string into a
python dictionary.
For example, given the following string:
{'TITLE' : 'EX_TITLE', 'AUTHOR' : 'EX_AUTHOR', 'REPORTNUMBER' : 'EX_RN'}
A dictionary in the following format will be returned:
{
'TITLE' : 'EX_TITLE',
'AUTHOR' : 'EX_AUTHOR',
'REPORTNUMBER' : 'EX_RN',
}
@param dict_string: (string) - the string version of the dictionary.
@return: (dictionary) - the dictionary build from the string.
"""
## First, strip off the leading and trailing spaces and braces:
dict_string = dict_string.strip(" {}")
## Next, split the string on commas (,) that have not been escaped
## So, the following string: """'hello' : 'world', 'click' : 'here'"""
## will be split into the following list:
## ["'hello' : 'world'", " 'click' : 'here'"]
##
## However, the string """'hello\, world' : '!', 'click' : 'here'"""
## will be split into: ["'hello\, world' : '!'", " 'click' : 'here'"]
## I.e. the comma that was escaped in the string has been kept.
##
## So basically, split on unescaped parameters at first:
key_vals = re.split(r'(?<!\\),', dict_string)
## Now we should have a list of "key" : "value" terms. For each of them,
## check it is OK. If not in the format "Key" : "Value" (quotes are
## optional), discard it. As with the comma separator in the previous
## splitting, this one splits on the first colon (:) ONLY.
final_dictionary = {}
for key_value_string in key_vals:
## Split the pair apart, based on the first ":":
key_value_pair = key_value_string.split(":", 1)
## check that the length of the new list is 2:
if len(key_value_pair) != 2:
## There was a problem with the splitting - pass this pair
continue
## The split was made.
## strip white-space, single-quotes and double-quotes from around the
## key and value pairs:
key_term = key_value_pair[0].strip(" '\"")
value_term = key_value_pair[1].strip(" '\"")
## Is the left-side (key) term empty?
if len(key_term) == 0:
continue
## Now, add the search-replace pair to the dictionary of
## search-replace terms:
final_dictionary[key_term] = value_term
return final_dictionary
|
l0b0/cds-invenio-vengmark
|
modules/websubmit/lib/functions/Stamp_Uploaded_Files.py
|
Python
|
gpl-2.0
| 27,593
|
[
"VisIt"
] |
d887f39401c1837a37ed7b4dbf10b1d7ef5838528a1cae44c77d690e61d4689f
|
#
# Adrian Soto
# 22-12-2014
# Stony Brook University
#
# Scissor correction to bands structure in
# Quantum Espresso format.
#
class band:
def __init__(self, numkpoints, bandenergies):
self.nks = numkpoints
if (len(bandenergies) != numkpoints):
print "ERROR: list of band energies has wrong length. Setting band to 0."
self.nrg = [0] * numkpoints
else:
self.nrg = bandenergies
def shiftband(self, energies, shift):
energies = map(lambda x : x+shift, energies) # watch for scope here.
return
class kpoints:
def __init__(self):
self.klist = []
def ReadBandStructure(bandsfile, nks, xkpt, nbnd, bsflt):
#
# This function reads the band structure as written
# to output of the bands.x program. It returns the bs
# as a flat list with all energies and another list with
# the k-point coordinates.
#
global nks
global nbnd
global xkpt
global bsflt
f = open(bandsfile, 'r')
# First line contains nbnd and nks. Read.
currentline = f.readline()
nks = int(currentline[22:26])
nbnd = int(currentline[12:16])
print nbnd, nks
# Following lines contain the k-point coordinates
# and the band energies.
# Calculate number of lines containing band structure:
# nks k-point lines
# At each k-point there are (1+nbnd/10) energy values.
nlpkp = 1+nbnd/10 # Number of Lines Per K-Point
nlines = nks + nks * nlpkp
bsaux = []
xkpt = []
for ik in range (0, nks):
currentline = f.readline()
#kpoint = currentline[12:40]
kpoint = [float(x) for x in currentline.split()]
xkpt.append(kpoint)
auxenerg = []
for ibnd in range(0, nlpkp):
currentline = f.readline()
# append current line to auxiliary list
auxenerg.append( float(x) for x in currentline.split() )
# flatten list of lists containing energies for a given kpoint
# (each sublist corresponds to one line in the bands.dat file)
energ = [item for sublist in auxenerg for item in sublist]
# append to band structure
bsaux.append(energ)
f.close()
# Flatten whole band structure and rearrange as a list of band objects
bsflt = [item for sublist in bsaux for item in sublist]
return
def ArrangeByBands(numkpts, numbands, bsflat, bs):
global bs
# Ensure bs is empty list
if not bs:
for ibnd in range (0, numbands):
currentband=[]
for ik in range (0, numkpts):
currentband.append(bsflat[ik*nbnd+ibnd])
bs.append( band(nks, currentband) )
for ibnd in range (0, nbnd):
print bs[ibnd].nrg
else:
print "ERROR: bs list is not empty"
return
def GetBands(bandsfile, xk, bs):
nks=0
nbnd=0
bsflat=[]
bs=[]
ReadBandStructure(bandsfile, nks, xk, nbnd, bsflat)
print nks, nbnd
print bsflat
ArrangeByBands(nks, nbnd, bsflat, bs)
return
#def WriteBandStructure():
# print (" %10.6f%10.6f%10.6f" % (kpoint[0], kpoint[1], kpoint[2]) )
#######################
filename="bands.dat"
xk=[]
bs=[]
GetBands(filename, xk, bs)
#filecontent = f.read()
#print filecontent[1]
#testband = band()
#testband.readband(filecontent)
|
adrian-soto/QEdark_repo
|
tools/bandsndos/scissor_old.py
|
Python
|
gpl-2.0
| 3,508
|
[
"Quantum ESPRESSO"
] |
637a521323fd4e126623e3c854d2b3fd2b8529444e8fc24a1cf92000c265ec8f
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the main LMS Dashboard (aka, Student Dashboard).
"""
import datetime
from nose.plugins.attrib import attr
from ..helpers import UniqueCourseTest
from ...fixtures.course import CourseFixture
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.dashboard import DashboardPage
DEFAULT_SHORT_DATE_FORMAT = "%b %d, %Y"
DEFAULT_DAY_AND_TIME_FORMAT = "%A at %-I%P"
class BaseLmsDashboardTest(UniqueCourseTest):
""" Base test suite for the LMS Student Dashboard """
def setUp(self):
"""
Initializes the components (page objects, courses, users) for this test suite
"""
# Some parameters are provided by the parent setUp() routine, such as the following:
# self.course_id, self.course_info, self.unique_id
super(BaseLmsDashboardTest, self).setUp()
# Load page objects for use by the tests
self.dashboard_page = DashboardPage(self.browser)
# Configure some aspects of the test course and install the settings into the course
self.course_fixture = CourseFixture(
self.course_info["org"],
self.course_info["number"],
self.course_info["run"],
self.course_info["display_name"],
)
self.course_fixture.add_advanced_settings({
u"social_sharing_url": {u"value": "http://custom/course/url"}
})
self.course_fixture.install()
# Create the test user, register them for the course, and authenticate
self.username = "test_{uuid}".format(uuid=self.unique_id[0:6])
self.email = "{user}@example.com".format(user=self.username)
AutoAuthPage(
self.browser,
username=self.username,
email=self.email,
course_id=self.course_id
).visit()
# Navigate the authenticated, enrolled user to the dashboard page and get testing!
self.dashboard_page.visit()
class LmsDashboardPageTest(BaseLmsDashboardTest):
""" Test suite for the LMS Student Dashboard page """
def setUp(self):
super(LmsDashboardPageTest, self).setUp()
# now datetime for usage in tests
self.now = datetime.datetime.now()
def test_dashboard_course_listings(self):
"""
Perform a general validation of the course listings section
"""
course_listings = self.dashboard_page.get_course_listings()
self.assertEqual(len(course_listings), 1)
def test_dashboard_social_sharing_feature(self):
"""
Validate the behavior of the social sharing feature
"""
twitter_widget = self.dashboard_page.get_course_social_sharing_widget('twitter')
twitter_url = "https://twitter.com/intent/tweet?text=Testing+feature%3A%20http%3A%2F%2Fcustom%2Fcourse%2Furl"
self.assertEqual(twitter_widget.attrs('title')[0], 'Share on Twitter')
self.assertEqual(twitter_widget.attrs('data-tooltip')[0], 'Share on Twitter')
self.assertEqual(twitter_widget.attrs('aria-haspopup')[0], 'true')
self.assertEqual(twitter_widget.attrs('aria-expanded')[0], 'false')
self.assertEqual(twitter_widget.attrs('target')[0], '_blank')
self.assertIn(twitter_url, twitter_widget.attrs('href')[0])
self.assertIn(twitter_url, twitter_widget.attrs('onclick')[0])
facebook_widget = self.dashboard_page.get_course_social_sharing_widget('facebook')
facebook_url = "https://www.facebook.com/sharer/sharer.php?u=http%3A%2F%2Fcustom%2Fcourse%2Furl"
self.assertEqual(facebook_widget.attrs('title')[0], 'Share on Facebook')
self.assertEqual(facebook_widget.attrs('data-tooltip')[0], 'Share on Facebook')
self.assertEqual(facebook_widget.attrs('aria-haspopup')[0], 'true')
self.assertEqual(facebook_widget.attrs('aria-expanded')[0], 'false')
self.assertEqual(facebook_widget.attrs('target')[0], '_blank')
self.assertIn(facebook_url, facebook_widget.attrs('href')[0])
self.assertIn(facebook_url, facebook_widget.attrs('onclick')[0])
def test_ended_course_date(self):
"""
Scenario:
Course Date should have the format 'Ended - Sep 23, 2015'
if the course on student dashboard has ended.
As a Student,
Given that I have enrolled to a course
And the course has ended in the past
When I visit dashboard page
Then the course date should have the following format "Ended - %b %d, %Y" e.g. "Ended - Sep 23, 2015"
"""
course_start_date = datetime.datetime(1970, 1, 1)
course_end_date = self.now - datetime.timedelta(days=90)
self.course_fixture.add_course_details({'start_date': course_start_date,
'end_date': course_end_date})
self.course_fixture.configure_course()
end_date = course_end_date.strftime(DEFAULT_SHORT_DATE_FORMAT)
expected_course_date = "Ended - {end_date}".format(end_date=end_date)
# reload the page for changes to course date changes to appear in dashboard
self.dashboard_page.visit()
course_date = self.dashboard_page.get_course_date()
# Test that proper course date with 'ended' message is displayed if a course has already ended
self.assertEqual(course_date, expected_course_date)
def test_running_course_date(self):
"""
Scenario:
Course Date should have the format 'Started - Sep 23, 2015'
if the course on student dashboard is running.
As a Student,
Given that I have enrolled to a course
And the course has started
And the course is in progress
When I visit dashboard page
Then the course date should have the following format "Started - %b %d, %Y" e.g. "Started - Sep 23, 2015"
"""
course_start_date = datetime.datetime(1970, 1, 1)
course_end_date = self.now + datetime.timedelta(days=90)
self.course_fixture.add_course_details({'start_date': course_start_date,
'end_date': course_end_date})
self.course_fixture.configure_course()
start_date = course_start_date.strftime(DEFAULT_SHORT_DATE_FORMAT)
expected_course_date = "Started - {start_date}".format(start_date=start_date)
# reload the page for changes to course date changes to appear in dashboard
self.dashboard_page.visit()
course_date = self.dashboard_page.get_course_date()
# Test that proper course date with 'started' message is displayed if a course is in running state
self.assertEqual(course_date, expected_course_date)
def test_future_course_date(self):
"""
Scenario:
Course Date should have the format 'Starts - Sep 23, 2015'
if the course on student dashboard starts in future.
As a Student,
Given that I have enrolled to a course
And the course starts in future
And the course does not start within 5 days
When I visit dashboard page
Then the course date should have the following format "Starts - %b %d, %Y" e.g. "Starts - Sep 23, 2015"
"""
course_start_date = self.now + datetime.timedelta(days=30)
course_end_date = self.now + datetime.timedelta(days=365)
self.course_fixture.add_course_details({'start_date': course_start_date,
'end_date': course_end_date})
self.course_fixture.configure_course()
start_date = course_start_date.strftime(DEFAULT_SHORT_DATE_FORMAT)
expected_course_date = "Starts - {start_date}".format(start_date=start_date)
# reload the page for changes to course date changes to appear in dashboard
self.dashboard_page.visit()
course_date = self.dashboard_page.get_course_date()
# Test that proper course date with 'starts' message is displayed if a course is about to start in future,
# and course does not start within 5 days
self.assertEqual(course_date, expected_course_date)
def test_near_future_course_date(self):
"""
Scenario:
Course Date should have the format 'Starts - Wednesday at 5am UTC'
if the course on student dashboard starts within 5 days.
As a Student,
Given that I have enrolled to a course
And the course starts within 5 days
When I visit dashboard page
Then the course date should have the following format "Starts - %A at %-I%P UTC"
e.g. "Starts - Wednesday at 5am UTC"
"""
course_start_date = self.now + datetime.timedelta(days=2)
course_end_date = self.now + datetime.timedelta(days=365)
self.course_fixture.add_course_details({'start_date': course_start_date,
'end_date': course_end_date})
self.course_fixture.configure_course()
start_date = course_start_date.strftime(DEFAULT_DAY_AND_TIME_FORMAT)
expected_course_date = "Starts - {start_date} UTC".format(start_date=start_date)
# reload the page for changes to course date changes to appear in dashboard
self.dashboard_page.visit()
course_date = self.dashboard_page.get_course_date()
# Test that proper course date with 'starts' message is displayed if a course is about to start in future,
# and course starts within 5 days
self.assertEqual(course_date, expected_course_date)
@attr('a11y')
class LmsDashboardA11yTest(BaseLmsDashboardTest):
"""
Class to test lms student dashboard accessibility.
"""
def test_dashboard_course_listings_a11y(self):
"""
Test the accessibility of the course listings
"""
course_listings = self.dashboard_page.get_course_listings()
self.assertEqual(len(course_listings), 1)
self.dashboard_page.a11y_audit.config.set_rules({
"ignore": [
'skip-link', # TODO: AC-179
'link-href', # TODO: AC-238, AC-179
],
})
self.dashboard_page.a11y_audit.check_for_accessibility_errors()
|
IndonesiaX/edx-platform
|
common/test/acceptance/tests/lms/test_lms_dashboard.py
|
Python
|
agpl-3.0
| 10,275
|
[
"VisIt"
] |
9e9de3fad4ffda1066e3d168a52d15d0f9a36d8c1d3a5a1cd0e001c2224286f9
|
# Copyright 2017 Xiaomi, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Constant(object):
"""The constant variables for signature."""
AUTHORIZATION = "Authorization"
HOST = "Host"
SECRET_KEY_ID = "X-Xiaomi-Secret-Key-Id"
TIMESTAMP = "X-Xiaomi-Timestamp"
CONTENT_MD5 = "X-Xiaomi-Content-MD5"
AUTHORIZATION_PREFIX = "Galaxy V3 "
|
dlzhangxg/cloud-ml-sdk
|
cloud_ml_common/cloud_ml_common/auth/constant.py
|
Python
|
apache-2.0
| 854
|
[
"Galaxy"
] |
bb3a4447ad276db053156ef6665e06d610c6f15f4b7bab6ab697e2e9feada578
|
#!/usr/bin/env python
"""
Ban one or more Storage Elements for usage
Example:
$ dirac-admin-ban-se M3PEC-disk
"""
import DIRAC
from DIRAC.Core.Base.Script import Script
@Script()
def main():
read = True
write = True
check = True
remove = True
sites = []
mute = False
Script.registerSwitch("r", "BanRead", " Ban only reading from the storage element")
Script.registerSwitch("w", "BanWrite", " Ban writing to the storage element")
Script.registerSwitch("k", "BanCheck", " Ban check access to the storage element")
Script.registerSwitch("v", "BanRemove", " Ban remove access to the storage element")
Script.registerSwitch("a", "All", " Ban all access to the storage element")
Script.registerSwitch("m", "Mute", " Do not send email")
Script.registerSwitch(
"S:", "Site=", " Ban all SEs associate to site (note that if writing is allowed, check is always allowed)"
)
# Registering arguments will automatically add their description to the help menu
Script.registerArgument(["seGroupList: list of SEs or comma-separated SEs"])
switches, ses = Script.parseCommandLine(ignoreErrors=True)
for switch in switches:
if switch[0].lower() in ("r", "banread"):
write = False
check = False
remove = False
if switch[0].lower() in ("w", "banwrite"):
read = False
check = False
remove = False
if switch[0].lower() in ("k", "bancheck"):
read = False
write = False
remove = False
if switch[0].lower() in ("v", "banremove"):
read = False
write = False
check = False
if switch[0].lower() in ("a", "all"):
pass
if switch[0].lower() in ("m", "mute"):
mute = True
if switch[0].lower() in ("s", "site"):
sites = switch[1].split(",")
# from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC import gConfig, gLogger
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import resolveSEGroup, DMSHelpers
ses = resolveSEGroup(ses)
diracAdmin = DiracAdmin()
setup = gConfig.getValue("/DIRAC/Setup", "")
if not setup:
print("ERROR: Could not contact Configuration Service")
DIRAC.exit(2)
res = getProxyInfo()
if not res["OK"]:
gLogger.error("Failed to get proxy information", res["Message"])
DIRAC.exit(2)
userName = res["Value"].get("username")
if not userName:
gLogger.error("Failed to get username for proxy")
DIRAC.exit(2)
for site in sites:
res = DMSHelpers().getSEsForSite(site)
if not res["OK"]:
gLogger.error(res["Message"], site)
DIRAC.exit(-1)
ses.extend(res["Value"])
if not ses:
gLogger.error("There were no SEs provided")
DIRAC.exit(-1)
readBanned = []
writeBanned = []
checkBanned = []
removeBanned = []
resourceStatus = ResourceStatus()
res = resourceStatus.getElementStatus(ses, "StorageElement")
if not res["OK"]:
gLogger.error("Storage Element %s does not exist" % ses)
DIRAC.exit(-1)
reason = "Forced with dirac-admin-ban-se by %s" % userName
for se, seOptions in res["Value"].items():
resW = resC = resR = {"OK": False}
# Eventually, we will get rid of the notion of InActive, as we always write Banned.
if read and "ReadAccess" in seOptions:
if seOptions["ReadAccess"] == "Banned":
gLogger.notice("Read access already banned", se)
resR["OK"] = True
elif not seOptions["ReadAccess"] in ["Active", "Degraded", "Probing"]:
gLogger.notice(
"Read option for %s is %s, instead of %s"
% (se, seOptions["ReadAccess"], ["Active", "Degraded", "Probing"])
)
gLogger.notice("Try specifying the command switches")
else:
resR = resourceStatus.setElementStatus(se, "StorageElement", "ReadAccess", "Banned", reason, userName)
# res = csAPI.setOption( "%s/%s/ReadAccess" % ( storageCFGBase, se ), "InActive" )
if not resR["OK"]:
gLogger.error("Failed to update %s read access to Banned" % se)
else:
gLogger.notice("Successfully updated %s read access to Banned" % se)
readBanned.append(se)
# Eventually, we will get rid of the notion of InActive, as we always write Banned.
if write and "WriteAccess" in seOptions:
if seOptions["WriteAccess"] == "Banned":
gLogger.notice("Write access already banned", se)
resW["OK"] = True
elif not seOptions["WriteAccess"] in ["Active", "Degraded", "Probing"]:
gLogger.notice(
"Write option for %s is %s, instead of %s"
% (se, seOptions["WriteAccess"], ["Active", "Degraded", "Probing"])
)
gLogger.notice("Try specifying the command switches")
else:
resW = resourceStatus.setElementStatus(se, "StorageElement", "WriteAccess", "Banned", reason, userName)
# res = csAPI.setOption( "%s/%s/WriteAccess" % ( storageCFGBase, se ), "InActive" )
if not resW["OK"]:
gLogger.error("Failed to update %s write access to Banned" % se)
else:
gLogger.notice("Successfully updated %s write access to Banned" % se)
writeBanned.append(se)
# Eventually, we will get rid of the notion of InActive, as we always write Banned.
if check and "CheckAccess" in seOptions:
if seOptions["CheckAccess"] == "Banned":
gLogger.notice("Check access already banned", se)
resC["OK"] = True
elif not seOptions["CheckAccess"] in ["Active", "Degraded", "Probing"]:
gLogger.notice(
"Check option for %s is %s, instead of %s"
% (se, seOptions["CheckAccess"], ["Active", "Degraded", "Probing"])
)
gLogger.notice("Try specifying the command switches")
else:
resC = resourceStatus.setElementStatus(se, "StorageElement", "CheckAccess", "Banned", reason, userName)
# res = csAPI.setOption( "%s/%s/CheckAccess" % ( storageCFGBase, se ), "InActive" )
if not resC["OK"]:
gLogger.error("Failed to update %s check access to Banned" % se)
else:
gLogger.notice("Successfully updated %s check access to Banned" % se)
checkBanned.append(se)
# Eventually, we will get rid of the notion of InActive, as we always write Banned.
if remove and "RemoveAccess" in seOptions:
if seOptions["RemoveAccess"] == "Banned":
gLogger.notice("Remove access already banned", se)
resC["OK"] = True
elif not seOptions["RemoveAccess"] in ["Active", "Degraded", "Probing"]:
gLogger.notice(
"Remove option for %s is %s, instead of %s"
% (se, seOptions["RemoveAccess"], ["Active", "Degraded", "Probing"])
)
gLogger.notice("Try specifying the command switches")
else:
resC = resourceStatus.setElementStatus(se, "StorageElement", "RemoveAccess", "Banned", reason, userName)
# res = csAPI.setOption( "%s/%s/CheckAccess" % ( storageCFGBase, se ), "InActive" )
if not resC["OK"]:
gLogger.error("Failed to update %s remove access to Banned" % se)
else:
gLogger.notice("Successfully updated %s remove access to Banned" % se)
removeBanned.append(se)
if not (resR["OK"] or resW["OK"] or resC["OK"]):
DIRAC.exit(-1)
if not (writeBanned or readBanned or checkBanned or removeBanned):
gLogger.notice("No storage elements were banned")
DIRAC.exit(-1)
if mute:
gLogger.notice("Email is muted by script switch")
DIRAC.exit(0)
subject = "%s storage elements banned for use" % len(writeBanned + readBanned + checkBanned + removeBanned)
addressPath = "EMail/Production"
address = Operations().getValue(addressPath, "")
body = ""
if read:
body = "%s\n\nThe following storage elements were banned for reading:" % body
for se in readBanned:
body = "%s\n%s" % (body, se)
if write:
body = "%s\n\nThe following storage elements were banned for writing:" % body
for se in writeBanned:
body = "%s\n%s" % (body, se)
if check:
body = "%s\n\nThe following storage elements were banned for check access:" % body
for se in checkBanned:
body = "%s\n%s" % (body, se)
if remove:
body = "%s\n\nThe following storage elements were banned for remove access:" % body
for se in removeBanned:
body = "%s\n%s" % (body, se)
if not address:
gLogger.notice("'%s' not defined in Operations, can not send Mail\n" % addressPath, body)
DIRAC.exit(0)
res = diracAdmin.sendMail(address, subject, body)
gLogger.notice("Notifying %s" % address)
if res["OK"]:
gLogger.notice(res["Value"])
else:
gLogger.notice(res["Message"])
DIRAC.exit(0)
if __name__ == "__main__":
main()
|
DIRACGrid/DIRAC
|
src/DIRAC/DataManagementSystem/scripts/dirac_admin_ban_se.py
|
Python
|
gpl-3.0
| 10,005
|
[
"DIRAC"
] |
b8035ba4fa1124fd64247056e09189040caf9947db2c99a7a9ee3d5bd16b769e
|
#!/usr/bin/env python
# prepare configuration files for Circos!
import sys
import time
import optparse
import general
import numpy
import pickle
import pdb
import metrn
import fasta
import modencode
import os
print "Command:", " ".join(sys.argv)
print "Timestamp:", time.asctime(time.localtime())
""" define a function that converts a local user path to SCG3 or GS server paths """
def serverPath(inpath, server="ON"):
if server == "ON":
return inpath.replace("/Users/claraya/", "/srv/gs1/projects/snyder/claraya/")
elif server == "GS":
return inpath.replace("/Users/claraya/", "/net/fields/vol1/home/araya/")
def main():
parser = optparse.OptionParser()
parser.add_option("--path", action = "store", type = "string", dest = "path", help = "Path from script to files")
parser.add_option("--organism", action = "store", type = "string", dest = "organism", help = "Model organism targeted for analysis")
parser.add_option("--infile", action = "store", type = "string", dest = "infile", help = "Input file (with path)")
parser.add_option("--mode", action = "store", type = "string", dest = "mode", help = "Type of operations to be performed: karyotype, import, extend...")
parser.add_option("--name", action = "store", type = "string", dest = "name", help = "Name of Circos visualization", default="OFF")
parser.add_option("--track", action = "store", type = "string", dest = "track", help = "Name of Circos data track", default="OFF")
parser.add_option("--nuclear", action = "store", type = "string", dest = "nuclear", help = "Peaks are only nuclear?", default="ON")
parser.add_option("--color", action = "store", type = "string", dest = "color", help = "Color for the karyotype track", default="red")
parser.add_option("--scale", action = "store", type = "string", dest = "scale", help = "Scale for chromosome labeling", default="1000000")
parser.add_option("--min", action = "store", type = "string", dest = "min", help = "Min value on track", default="1")
parser.add_option("--max", action = "store", type = "string", dest = "max", help = "Max value on track", default="50")
parser.add_option("--hi", action = "store", type = "string", dest = "hi", help = "Hi-point of track", default="0.99")
parser.add_option("--lo", action = "store", type = "string", dest = "lo", help = "Lo-point of track", default="0.50")
parser.add_option("--fillcolor", action = "store", type = "string", dest = "fillcolor", help = "Fill color for the karyotype track", default="optred")
parser.add_option("--fillunder", action = "store", type = "string", dest = "fillunder", help = "Fill under the line?", default="yes")
parser.add_option("--parameters", action = "store", type = "string", dest = "parameters", help = "Variable parameters...", default="")
parser.add_option("--threads", action = "store", type = "int", dest = "threads", help = "Parallel processing threads", default=1)
parser.add_option("--chunks", action = "store", type = "int", dest = "chunks", help = "", default=100)
parser.add_option("--module", action = "store", type = "string", dest = "module", help = "", default="md1")
parser.add_option("--qsub", action = "store", type = "string", dest = "qsub", help = "Qsub configuration header", default="OFF")
parser.add_option("--server", action = "store", type = "string", dest = "server", help = "Are we on the server?", default="OFF")
parser.add_option("--job", action = "store", type = "string", dest = "job", help = "Job name for cluster", default="OFF")
parser.add_option("--copy", action = "store", type = "string", dest = "copy", help = "Copy simulated peaks to analysis folder?", default="OFF")
parser.add_option("--tag", action = "store", type = "string", dest = "tag", help = "Add tag to TFBS?", default="")
(option, args) = parser.parse_args()
# import paths:
if option.server == "OFF":
path_dict = modencode.configBuild(option.path + "/input/" + "configure_path.txt")
elif option.server == "ON":
path_dict = modencode.configBuild(option.path + "/input/" + "configure_server.txt")
elif option.server == "GS":
path_dict = modencode.configBuild(option.path + "/input/" + "configure_nexus.txt")
# specify input and output paths:
inpath = path_dict["input"]
extraspath = path_dict["extras"]
pythonpath = path_dict["python"]
scriptspath = path_dict["scripts"]
downloadpath = path_dict["download"]
fastqpath = path_dict["fastq"]
bowtiepath = path_dict["bowtie"]
bwapath = path_dict["bwa"]
macspath = path_dict["macs"]
memepath = path_dict["meme"]
idrpath = path_dict["idr"]
igvpath = path_dict["igv"]
testpath = path_dict["test"]
processingpath = path_dict["processing"]
annotationspath = path_dict["annotations"]
peakspath = path_dict["peaks"]
gopath = path_dict["go"]
hotpath = path_dict["hot"]
qsubpath = path_dict["qsub"]
circospath = path_dict["circos"]
# standardize paths for analysis:
alignerpath = bwapath
indexpath = alignerpath + "index/"
alignmentpath = alignerpath + "alignment/"
qcfilterpath = alignerpath + "qcfilter/"
qcmergepath = alignerpath + "qcmerge/"
# import configuration dictionaries:
source_dict = modencode.configBuild(inpath + "configure_source.txt")
method_dict = modencode.configBuild(inpath + "configure_method.txt")
context_dict = modencode.configBuild(inpath + "configure_context.txt")
# define organism parameters:
if option.organism == "hs" or option.organism == "h.sapiens":
organismTag = "hs"
#organismIGV = "ce6"
elif option.organism == "mm" or option.organism == "m.musculus":
organismTag = "mm"
#organismIGV = "ce6"
elif option.organism == "ce" or option.organism == "c.elegans":
organismTag = "ce"
#organismIGV = "ce6"
elif option.organism == "dm" or option.organism == "d.melanogaster":
organismTag = "dm"
#organismIGV = "dm5"
# specify genome size file:
if option.nuclear == "ON":
chromosomes = metrn.chromosomes[organismTag]["nuclear"]
genome_size_file = option.path + "/input/" + metrn.reference[organismTag]["nuclear_sizes"]
genome_size_dict = general.build_config(genome_size_file, mode="single", separator="\t", spaceReplace=True)
else:
chromosomes = metrn.chromosomes[organismTag]["complete"]
genome_size_file = option.path + "/input/" + metrn.reference[organismTag]["complete_sizes"]
genome_size_dict = general.build_config(genome_size_file, mode="single", separator="\t", spaceReplace=True)
# define organism parameters:
if option.organism == "h.sapiens" or option.organism == "human" or option.organism == "hs":
contextTag = "cells"
idColumns = ["name", "code", "hgcn","ensembl"]
idComplexList = list()
elif option.organism == "m.musculus" or option.organism == "mouse" or option.organism == "mm":
contextTag = "cells"
idColumns = ["name", "code", "hgcn","ensembl"]
idComplexList = list()
elif option.organism == "c.elegans" or option.organism == "worm" or option.organism == "ce":
contextTag = "stage"
idColumns = ["name", "code", "wormbase","ensembl"]
idComplexList = list()
elif option.organism == "d.melanogaster" or option.organism == "fly" or option.organism == "dm":
contextTag = "stage"
idColumns = ["name", "code", "flybase","ensembl"]
idComplexList = list()
# prepare karyotype file:
if option.mode == "karyotype":
# generate Circos path:
general.pathGenerator(circospath)
# setup output file:
f_output = open(circospath + "mapcircos_" + organismTag +"_karyotype.txt", "w")
index = 1
print
print "Generating karyotype file..."
for chrm in chromosomes:
#chr - hs1 1 0 249250621 chr1
print >>f_output, " ".join(["chr", "-", organismTag + str(index), chrm, "0", str(genome_size_dict[chrm]), "chr" + str(index)])
index += 1
print "Karyotype: ", len(chromosomes), "chromosomes"
print
# close output files:
f_output.close()
# import bed features into circos format:
if option.mode == "import":
print
print "Loading chromosomes..."
index, chrm_dict = 1, dict()
for chrm in chromosomes:
chrm_dict[chrm.upper()] = str(index)
index += 1
# determine output name:
if option.name == "OFF":
filename = option.infile.split("/")
filename = filename[len(filename)-1]
option.name = filename.replace(".bed","").replace(".txt","")
# remove previous outputs:
#command = "rm -rf " + circospath + option.name
#os.system(command)
# prepare output path:
analysispath = circospath + option.name + "/"
datapath = circospath + option.name + "/data/"
etcpath = circospath + option.name + "/etc/"
general.pathGenerator(analysispath)
general.pathGenerator(datapath)
general.pathGenerator(etcpath)
# copy circos templates:
command = "cp -r " + circospath + "templates/* " + circospath + option.name
os.system(command)
command = "cp " + circospath + "mapcircos_" + organismTag +"_karyotype.txt " + datapath + "mapcircos_karyotype.txt"
os.system(command)
# replace color line:
command = "cp " + circospath + "templates/etc/colors-" + organismTag + ".conf " + etcpath + "colors.conf"
os.system(command)
# mess with configuration file:
configuration = open(circospath + "templates/circos.conf").read()
configuration = configuration.replace("chromosomes_units = 1000000", "chromosomes_units = " + option.scale)
configuration = configuration.replace("min = 1", "min = " + option.min)
configuration = configuration.replace("max = 50", "max = " + option.max)
configuration = configuration.replace("r0 = 0.50r", "r0 = " + option.hi + "r")
configuration = configuration.replace("r1 = 0.99r", "r1 = " + option.lo + "r")
configuration = configuration.replace("color = red", "color = " + option.color)
configuration = configuration.replace("fill_color = optred", "fill_color = " + option.fillcolor)
configuration = configuration.replace("fill_under = yes", "fill_under = " + option.fillunder)
c_output = open(analysispath + "circos.conf", "w")
print >>c_output, configuration
c_output.close
# setup output file:
f_output = open(datapath + "mapcircos_data.txt", "w")
# import the features:
print "Creating Circos data file..."
inlines = open(option.infile).readlines()
inlines.pop(0)
for inline in inlines:
chrm, start, stop, feature, score, strand = inline.strip().split("\t")[:6]
if chrm in chromosomes:
chrm = organismTag + chrm_dict[chrm.upper()]
print >>f_output, " ".join([chrm, start, stop, score])
# close output files:
f_output.close()
print
# extend bed features into circos format:
if option.mode == "extend":
# determine track name:
if option.track == "OFF":
trackname = option.infile.split("/")
trackname = trackname[len(trackname)-1]
option.track = trackname.replace(".bed","").replace(".txt","")
option.track = "mapcircos_track_" + option.track + ".txt"
print
print "Track:", option.track
# load chromosomes:
index, chrm_dict = 1, dict()
for chrm in chromosomes:
chrm_dict[chrm.upper()] = str(index)
index += 1
# load output path:
analysispath = circospath + option.name + "/"
datapath = circospath + option.name + "/data/"
etcpath = circospath + option.name + "/etc/"
# load configuration segment:
configuration = open(circospath + "templates/circos.conf").read()
configuration = configuration.split("<plots>")[1].split("</plots>")[0]
configuration = configuration.replace("min = 1", "min = " + option.min)
configuration = configuration.replace("max = 50", "max = " + option.max)
configuration = configuration.replace("r0 = 0.50r", "r0 = " + option.hi + "r")
configuration = configuration.replace("r1 = 0.99r", "r1 = " + option.lo + "r")
configuration = configuration.replace("color = red", "color = " + option.color)
configuration = configuration.replace("fill_color = optred", "fill_color = " + option.fillcolor)
configuration = configuration.replace("fill_under = yes", "fill_under = " + option.fillunder)
configuration = configuration.replace("file = data/mapcircos_data.txt", "file = data/" + option.track)
# update configuration file:
print "Updating configuration..."
updated = open(analysispath + "circos.conf").read()
updated = updated.replace("</plots>", configuration + "</plots>")
c_output = open(analysispath + "circos.conf", "w")
print >>c_output, updated
c_output.close
# setup output file:
f_output = open(datapath + option.track, "w")
# import the features:
print "Creating track data file..."
inlines = open(option.infile).readlines()
inlines.pop(0)
for inline in inlines:
chrm, start, stop, feature, score, strand = inline.strip().split("\t")[:6]
if chrm in chromosomes:
chrm = organismTag + chrm_dict[chrm.upper()]
print >>f_output, " ".join([chrm, start, stop, score])
# close output files:
f_output.close()
print
# launch circos visualization mode:
if option.mode == "launch":
# load output path:
analysispath = circospath + option.name + "/"
# launch circos:
print
print "Generating:", option.name
os.chdir(analysispath)
command = "circos -conf circos.conf"
os.system(command)
print
if __name__ == "__main__":
main()
print "Completed:", time.asctime(time.localtime())
#python mapCircos.py --path ~/meTRN --mode karyotype --organism ce
#python mapCircos.py --path ~/meTRN --mode import --organism ce --infile ~/meTRN/data/hot/regions/maphot_ce_selection_reg_cx_occP05_any.bed --scale 100000 --hi 0.90 --lo 0.98 --color dgrey --fillcolor dgrey
#python mapCircos.py --path ~/meTRN --mode extend --organism ce --name maphot_ce_selection_reg_cx_occP05_any --infile ~/meTRN/data/hot/regions/maphot_ce_selection_reg_ex_occP05_hot.bed --track ex --hi 0.80 --lo 0.88 --color optblue --fillcolor optblue
#python mapCircos.py --path ~/meTRN --mode extend --organism ce --name maphot_ce_selection_reg_cx_occP05_any --infile ~/meTRN/data/hot/regions/maphot_ce_selection_reg_l1_occP05_hot.bed --track l1 --hi 0.70 --lo 0.78 --color optgreen --fillcolor optgreen
#python mapCircos.py --path ~/meTRN --mode extend --organism ce --name maphot_ce_selection_reg_cx_occP05_any --infile ~/meTRN/data/hot/regions/maphot_ce_selection_reg_l2_occP05_hot.bed --track l2 --hi 0.60 --lo 0.68 --color optyellow --fillcolor optyellow
#python mapCircos.py --path ~/meTRN --mode extend --organism ce --name maphot_ce_selection_reg_cx_occP05_any --infile ~/meTRN/data/hot/regions/maphot_ce_selection_reg_l3_occP05_hot.bed --track l3 --hi 0.50 --lo 0.58 --color optorange --fillcolor optorange
#python mapCircos.py --path ~/meTRN --mode extend --organism ce --name maphot_ce_selection_reg_cx_occP05_any --infile ~/meTRN/data/hot/regions/maphot_ce_selection_reg_l4_occP05_hot.bed --track l4 --hi 0.40 --lo 0.48 --color optred --fillcolor optred
#circos -conf ./circos.conf
|
claraya/meTRN
|
python/mapCircos.py
|
Python
|
mit
| 14,748
|
[
"BWA",
"Bowtie"
] |
859cc8e32720b970c1a8d8b62e8856989a3864941da3ecada414b094b7dee949
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A runner implementation that submits a job for remote execution.
The runner will create a JSON description of the job graph and then submit it
to the Dataflow Service for remote execution by a worker.
"""
from __future__ import absolute_import
from __future__ import division
import base64
import json
import logging
import sys
import threading
import time
import traceback
import urllib
from builtins import hex
from collections import defaultdict
from future.utils import iteritems
import apache_beam as beam
from apache_beam import coders
from apache_beam import error
from apache_beam import pvalue
from apache_beam.internal import pickler
from apache_beam.internal.gcp import json_value
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import TestOptions
from apache_beam.options.pipeline_options import WorkerOptions
from apache_beam.portability import common_urns
from apache_beam.pvalue import AsSideInput
from apache_beam.runners.dataflow.internal import names
from apache_beam.runners.dataflow.internal.clients import dataflow as dataflow_api
from apache_beam.runners.dataflow.internal.names import PropertyNames
from apache_beam.runners.dataflow.internal.names import TransformNames
from apache_beam.runners.runner import PipelineResult
from apache_beam.runners.runner import PipelineRunner
from apache_beam.runners.runner import PipelineState
from apache_beam.runners.runner import PValueCache
from apache_beam.transforms import window
from apache_beam.transforms.display import DisplayData
from apache_beam.typehints import typehints
from apache_beam.utils import proto_utils
from apache_beam.utils.interactive_utils import is_in_notebook
from apache_beam.utils.plugin import BeamPlugin
if sys.version_info[0] > 2:
unquote_to_bytes = urllib.parse.unquote_to_bytes
quote = urllib.parse.quote
else:
unquote_to_bytes = urllib.unquote # pylint: disable=deprecated-urllib-function
quote = urllib.quote # pylint: disable=deprecated-urllib-function
__all__ = ['DataflowRunner']
_LOGGER = logging.getLogger(__name__)
class DataflowRunner(PipelineRunner):
"""A runner that creates job graphs and submits them for remote execution.
Every execution of the run() method will submit an independent job for
remote execution that consists of the nodes reachable from the passed in
node argument or entire graph if node is None. The run() method returns
after the service created the job and will not wait for the job to finish
if blocking is set to False.
"""
# A list of PTransformOverride objects to be applied before running a pipeline
# using DataflowRunner.
# Currently this only works for overrides where the input and output types do
# not change.
# For internal SDK use only. This should not be updated by Beam pipeline
# authors.
# Imported here to avoid circular dependencies.
# TODO: Remove the apache_beam.pipeline dependency in CreatePTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import CreatePTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import ReadPTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import JrhReadPTransformOverride
_PTRANSFORM_OVERRIDES = [
]
_JRH_PTRANSFORM_OVERRIDES = [
JrhReadPTransformOverride(),
]
# These overrides should be applied after the proto representation of the
# graph is created.
_NON_PORTABLE_PTRANSFORM_OVERRIDES = [
CreatePTransformOverride(),
ReadPTransformOverride(),
]
def __init__(self, cache=None):
# Cache of CloudWorkflowStep protos generated while the runner
# "executes" a pipeline.
self._cache = cache if cache is not None else PValueCache()
self._unique_step_id = 0
def is_fnapi_compatible(self):
return False
def _get_unique_step_name(self):
self._unique_step_id += 1
return 's%s' % self._unique_step_id
@staticmethod
def poll_for_job_completion(runner, result, duration):
"""Polls for the specified job to finish running (successfully or not).
Updates the result with the new job information before returning.
Args:
runner: DataflowRunner instance to use for polling job state.
result: DataflowPipelineResult instance used for job information.
duration (int): The time to wait (in milliseconds) for job to finish.
If it is set to :data:`None`, it will wait indefinitely until the job
is finished.
"""
last_message_time = None
current_seen_messages = set()
last_error_rank = float('-inf')
last_error_msg = None
last_job_state = None
# How long to wait after pipeline failure for the error
# message to show up giving the reason for the failure.
# It typically takes about 30 seconds.
final_countdown_timer_secs = 50.0
sleep_secs = 5.0
# Try to prioritize the user-level traceback, if any.
def rank_error(msg):
if 'work item was attempted' in msg:
return -1
elif 'Traceback' in msg:
return 1
return 0
if duration:
start_secs = time.time()
duration_secs = duration // 1000
job_id = result.job_id()
while True:
response = runner.dataflow_client.get_job(job_id)
# If get() is called very soon after Create() the response may not contain
# an initialized 'currentState' field.
if response.currentState is not None:
if response.currentState != last_job_state:
_LOGGER.info('Job %s is in state %s', job_id, response.currentState)
last_job_state = response.currentState
if str(response.currentState) != 'JOB_STATE_RUNNING':
# Stop checking for new messages on timeout, explanatory
# message received, success, or a terminal job state caused
# by the user that therefore doesn't require explanation.
if (final_countdown_timer_secs <= 0.0
or last_error_msg is not None
or str(response.currentState) == 'JOB_STATE_DONE'
or str(response.currentState) == 'JOB_STATE_CANCELLED'
or str(response.currentState) == 'JOB_STATE_UPDATED'
or str(response.currentState) == 'JOB_STATE_DRAINED'):
break
# Check that job is in a post-preparation state before starting the
# final countdown.
if (str(response.currentState) not in (
'JOB_STATE_PENDING', 'JOB_STATE_QUEUED')):
# The job has failed; ensure we see any final error messages.
sleep_secs = 1.0 # poll faster during the final countdown
final_countdown_timer_secs -= sleep_secs
time.sleep(sleep_secs)
# Get all messages since beginning of the job run or since last message.
page_token = None
while True:
messages, page_token = runner.dataflow_client.list_messages(
job_id, page_token=page_token, start_time=last_message_time)
for m in messages:
message = '%s: %s: %s' % (m.time, m.messageImportance, m.messageText)
if not last_message_time or m.time > last_message_time:
last_message_time = m.time
current_seen_messages = set()
if message in current_seen_messages:
# Skip the message if it has already been seen at the current
# time. This could be the case since the list_messages API is
# queried starting at last_message_time.
continue
else:
current_seen_messages.add(message)
# Skip empty messages.
if m.messageImportance is None:
continue
_LOGGER.info(message)
if str(m.messageImportance) == 'JOB_MESSAGE_ERROR':
if rank_error(m.messageText) >= last_error_rank:
last_error_rank = rank_error(m.messageText)
last_error_msg = m.messageText
if not page_token:
break
if duration:
passed_secs = time.time() - start_secs
if passed_secs > duration_secs:
_LOGGER.warning('Timing out on waiting for job %s after %d seconds',
job_id, passed_secs)
break
result._job = response
runner.last_error_msg = last_error_msg
@staticmethod
def group_by_key_input_visitor():
# Imported here to avoid circular dependencies.
from apache_beam.pipeline import PipelineVisitor
class GroupByKeyInputVisitor(PipelineVisitor):
"""A visitor that replaces `Any` element type for input `PCollection` of
a `GroupByKey` or `_GroupByKeyOnly` with a `KV` type.
TODO(BEAM-115): Once Python SDk is compatible with the new Runner API,
we could directly replace the coder instead of mutating the element type.
"""
def enter_composite_transform(self, transform_node):
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.transforms.core import GroupByKey, _GroupByKeyOnly
if isinstance(transform_node.transform, (GroupByKey, _GroupByKeyOnly)):
pcoll = transform_node.inputs[0]
pcoll.element_type = typehints.coerce_to_kv_type(
pcoll.element_type, transform_node.full_label)
key_type, value_type = pcoll.element_type.tuple_types
if transform_node.outputs:
from apache_beam.runners.portability.fn_api_runner_transforms import \
only_element
key = (
None if None in transform_node.outputs.keys()
else only_element(transform_node.outputs.keys()))
transform_node.outputs[key].element_type = typehints.KV[
key_type, typehints.Iterable[value_type]]
return GroupByKeyInputVisitor()
@staticmethod
def _set_pdone_visitor(pipeline):
# Imported here to avoid circular dependencies.
from apache_beam.pipeline import PipelineVisitor
class SetPDoneVisitor(PipelineVisitor):
def __init__(self, pipeline):
self._pipeline = pipeline
@staticmethod
def _maybe_fix_output(transform_node, pipeline):
if not transform_node.outputs:
pval = pvalue.PDone(pipeline)
pval.producer = transform_node
transform_node.outputs = {None: pval}
def enter_composite_transform(self, transform_node):
SetPDoneVisitor._maybe_fix_output(transform_node, self._pipeline)
def visit_transform(self, transform_node):
SetPDoneVisitor._maybe_fix_output(transform_node, self._pipeline)
return SetPDoneVisitor(pipeline)
@staticmethod
def side_input_visitor():
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.pipeline import PipelineVisitor
from apache_beam.transforms.core import ParDo
class SideInputVisitor(PipelineVisitor):
"""Ensures input `PCollection` used as a side inputs has a `KV` type.
TODO(BEAM-115): Once Python SDK is compatible with the new Runner API,
we could directly replace the coder instead of mutating the element type.
"""
def visit_transform(self, transform_node):
if isinstance(transform_node.transform, ParDo):
new_side_inputs = []
for ix, side_input in enumerate(transform_node.side_inputs):
access_pattern = side_input._side_input_data().access_pattern
if access_pattern == common_urns.side_inputs.ITERABLE.urn:
# Add a map to ('', value) as Dataflow currently only handles
# keyed side inputs.
pipeline = side_input.pvalue.pipeline
new_side_input = _DataflowIterableSideInput(side_input)
new_side_input.pvalue = beam.pvalue.PCollection(
pipeline,
element_type=typehints.KV[
bytes, side_input.pvalue.element_type],
is_bounded=side_input.pvalue.is_bounded)
parent = transform_node.parent or pipeline._root_transform()
map_to_void_key = beam.pipeline.AppliedPTransform(
pipeline,
beam.Map(lambda x: (b'', x)),
transform_node.full_label + '/MapToVoidKey%s' % ix,
(side_input.pvalue,))
new_side_input.pvalue.producer = map_to_void_key
map_to_void_key.add_output(new_side_input.pvalue)
parent.add_part(map_to_void_key)
elif access_pattern == common_urns.side_inputs.MULTIMAP.urn:
# Ensure the input coder is a KV coder and patch up the
# access pattern to appease Dataflow.
side_input.pvalue.element_type = typehints.coerce_to_kv_type(
side_input.pvalue.element_type, transform_node.full_label)
new_side_input = _DataflowMultimapSideInput(side_input)
else:
raise ValueError(
'Unsupported access pattern for %r: %r' %
(transform_node.full_label, access_pattern))
new_side_inputs.append(new_side_input)
transform_node.side_inputs = new_side_inputs
transform_node.transform.side_inputs = new_side_inputs
return SideInputVisitor()
@staticmethod
def flatten_input_visitor():
# Imported here to avoid circular dependencies.
from apache_beam.pipeline import PipelineVisitor
class FlattenInputVisitor(PipelineVisitor):
"""A visitor that replaces the element type for input ``PCollections``s of
a ``Flatten`` transform with that of the output ``PCollection``.
"""
def visit_transform(self, transform_node):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam import Flatten
if isinstance(transform_node.transform, Flatten):
output_pcoll = transform_node.outputs[None]
for input_pcoll in transform_node.inputs:
input_pcoll.element_type = output_pcoll.element_type
return FlattenInputVisitor()
def run_pipeline(self, pipeline, options):
"""Remotely executes entire pipeline or parts reachable from node."""
# Label goog-dataflow-notebook if job is started from notebook.
if is_in_notebook():
notebook_version = ('goog-dataflow-notebook=' +
beam.version.__version__.replace('.', '_'))
if options.view_as(GoogleCloudOptions).labels:
options.view_as(GoogleCloudOptions).labels.append(notebook_version)
else:
options.view_as(GoogleCloudOptions).labels = [notebook_version]
# Import here to avoid adding the dependency for local running scenarios.
try:
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.internal import apiclient
except ImportError:
raise ImportError(
'Google Cloud Dataflow runner not available, '
'please install apache_beam[gcp]')
# Convert all side inputs into a form acceptable to Dataflow.
if apiclient._use_fnapi(options):
pipeline.visit(self.side_input_visitor())
# Performing configured PTransform overrides. Note that this is currently
# done before Runner API serialization, since the new proto needs to contain
# any added PTransforms.
pipeline.replace_all(DataflowRunner._PTRANSFORM_OVERRIDES)
if (apiclient._use_fnapi(options)
and not apiclient._use_unified_worker(options)):
pipeline.replace_all(DataflowRunner._JRH_PTRANSFORM_OVERRIDES)
use_fnapi = apiclient._use_fnapi(options)
from apache_beam.transforms import environments
default_environment = environments.DockerEnvironment(
container_image=apiclient.get_container_image_from_options(options))
# Snapshot the pipeline in a portable proto.
self.proto_pipeline, self.proto_context = pipeline.to_runner_api(
return_context=True, default_environment=default_environment)
if use_fnapi:
# Cross language transform require using a pipeline object constructed
# from the full pipeline proto to make sure that expanded version of
# external transforms are reflected in the Pipeline job graph.
from apache_beam import Pipeline
pipeline = Pipeline.from_runner_api(
self.proto_pipeline, pipeline.runner, options,
allow_proto_holders=True)
# Pipelines generated from proto do not have output set to PDone set for
# leaf elements.
pipeline.visit(self._set_pdone_visitor(pipeline))
# We need to generate a new context that maps to the new pipeline object.
self.proto_pipeline, self.proto_context = pipeline.to_runner_api(
return_context=True, default_environment=default_environment)
else:
# Performing configured PTransform overrides which should not be reflected
# in the proto representation of the graph.
pipeline.replace_all(DataflowRunner._NON_PORTABLE_PTRANSFORM_OVERRIDES)
# Add setup_options for all the BeamPlugin imports
setup_options = options.view_as(SetupOptions)
plugins = BeamPlugin.get_all_plugin_paths()
if setup_options.beam_plugins is not None:
plugins = list(set(plugins + setup_options.beam_plugins))
setup_options.beam_plugins = plugins
# Elevate "min_cpu_platform" to pipeline option, but using the existing
# experiment.
debug_options = options.view_as(DebugOptions)
worker_options = options.view_as(WorkerOptions)
if worker_options.min_cpu_platform:
debug_options.add_experiment('min_cpu_platform=' +
worker_options.min_cpu_platform)
# Elevate "enable_streaming_engine" to pipeline option, but using the
# existing experiment.
google_cloud_options = options.view_as(GoogleCloudOptions)
if google_cloud_options.enable_streaming_engine:
debug_options.add_experiment("enable_windmill_service")
debug_options.add_experiment("enable_streaming_engine")
else:
if (debug_options.lookup_experiment("enable_windmill_service") or
debug_options.lookup_experiment("enable_streaming_engine")):
raise ValueError("""Streaming engine both disabled and enabled:
enable_streaming_engine flag is not set, but enable_windmill_service
and/or enable_streaming_engine experiments are present.
It is recommended you only set the enable_streaming_engine flag.""")
dataflow_worker_jar = getattr(worker_options, 'dataflow_worker_jar', None)
if dataflow_worker_jar is not None:
if not apiclient._use_fnapi(options):
_LOGGER.warning(
'Typical end users should not use this worker jar feature. '
'It can only be used when FnAPI is enabled.')
else:
debug_options.add_experiment('use_staged_dataflow_worker_jar')
# Make Dataflow workers use FastAvro on Python 3 unless use_avro experiment
# is set. Note that use_avro is only interpreted by the Dataflow runner
# at job submission and is not interpreted by Dataflow service or workers,
# which by default use avro library unless use_fastavro experiment is set.
if sys.version_info[0] > 2 and (
not debug_options.lookup_experiment('use_avro')):
debug_options.add_experiment('use_fastavro')
self.job = apiclient.Job(options, self.proto_pipeline)
# Dataflow runner requires a KV type for GBK inputs, hence we enforce that
# here.
pipeline.visit(self.group_by_key_input_visitor())
# Dataflow runner requires output type of the Flatten to be the same as the
# inputs, hence we enforce that here.
pipeline.visit(self.flatten_input_visitor())
# Trigger a traversal of all reachable nodes.
self.visit_transforms(pipeline, options)
test_options = options.view_as(TestOptions)
# If it is a dry run, return without submitting the job.
if test_options.dry_run:
return None
# Get a Dataflow API client and set its options
self.dataflow_client = apiclient.DataflowApplicationClient(options)
# Create the job description and send a request to the service. The result
# can be None if there is no need to send a request to the service (e.g.
# template creation). If a request was sent and failed then the call will
# raise an exception.
result = DataflowPipelineResult(
self.dataflow_client.create_job(self.job), self)
# TODO(BEAM-4274): Circular import runners-metrics. Requires refactoring.
from apache_beam.runners.dataflow.dataflow_metrics import DataflowMetrics
self._metrics = DataflowMetrics(self.dataflow_client, result, self.job)
result.metric_results = self._metrics
return result
def _get_typehint_based_encoding(self, typehint, window_coder):
"""Returns an encoding based on a typehint object."""
return self._get_cloud_encoding(
self._get_coder(typehint, window_coder=window_coder))
@staticmethod
def _get_coder(typehint, window_coder):
"""Returns a coder based on a typehint object."""
if window_coder:
return coders.WindowedValueCoder(
coders.registry.get_coder(typehint),
window_coder=window_coder)
return coders.registry.get_coder(typehint)
def _get_cloud_encoding(self, coder, unused=None):
"""Returns an encoding based on a coder object."""
if not isinstance(coder, coders.Coder):
raise TypeError('Coder object must inherit from coders.Coder: %s.' %
str(coder))
return coder.as_cloud_object(self.proto_context.coders)
def _get_side_input_encoding(self, input_encoding):
"""Returns an encoding for the output of a view transform.
Args:
input_encoding: encoding of current transform's input. Side inputs need
this because the service will check that input and output types match.
Returns:
An encoding that matches the output and input encoding. This is essential
for the View transforms introduced to produce side inputs to a ParDo.
"""
return {
'@type': 'kind:stream',
'component_encodings': [input_encoding],
'is_stream_like': {
'value': True
},
}
def _get_encoded_output_coder(self, transform_node, window_value=True):
"""Returns the cloud encoding of the coder for the output of a transform."""
from apache_beam.runners.portability.fn_api_runner_transforms import \
only_element
if len(transform_node.outputs) == 1:
output_tag = only_element(transform_node.outputs.keys())
# TODO(robertwb): Handle type hints for multi-output transforms.
element_type = transform_node.outputs[output_tag].element_type
else:
# TODO(silviuc): Remove this branch (and assert) when typehints are
# propagated everywhere. Returning an 'Any' as type hint will trigger
# usage of the fallback coder (i.e., cPickler).
element_type = typehints.Any
if window_value:
# All outputs have the same windowing. So getting the coder from an
# arbitrary window is fine.
output_tag = next(iter(transform_node.outputs.keys()))
window_coder = (
transform_node.outputs[
output_tag].windowing.windowfn.get_window_coder())
else:
window_coder = None
return self._get_typehint_based_encoding(element_type, window_coder)
def _add_step(self, step_kind, step_label, transform_node, side_tags=()):
"""Creates a Step object and adds it to the cache."""
# Import here to avoid adding the dependency for local running scenarios.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.internal import apiclient
step = apiclient.Step(step_kind, self._get_unique_step_name())
self.job.proto.steps.append(step.proto)
step.add_property(PropertyNames.USER_NAME, step_label)
# Cache the node/step association for the main output of the transform node.
# Main output key of external transforms can be ambiguous, so we only tag if
# there's only one tag instead of None.
from apache_beam.runners.portability.fn_api_runner_transforms import only_element
output_tag = (only_element(transform_node.outputs.keys())
if len(transform_node.outputs.keys()) == 1 else None)
self._cache.cache_output(transform_node, output_tag, step)
# If side_tags is not () then this is a multi-output transform node and we
# need to cache the (node, tag, step) for each of the tags used to access
# the outputs. This is essential because the keys used to search in the
# cache always contain the tag.
for tag in side_tags:
self._cache.cache_output(transform_node, tag, step)
# Finally, we add the display data items to the pipeline step.
# If the transform contains no display data then an empty list is added.
step.add_property(
PropertyNames.DISPLAY_DATA,
[item.get_dict() for item in
DisplayData.create_from(transform_node.transform).items])
return step
def _add_singleton_step(
self, label, full_label, tag, input_step, windowing_strategy,
access_pattern):
"""Creates a CollectionToSingleton step used to handle ParDo side inputs."""
# Import here to avoid adding the dependency for local running scenarios.
from apache_beam.runners.dataflow.internal import apiclient
step = apiclient.Step(TransformNames.COLLECTION_TO_SINGLETON, label)
self.job.proto.steps.append(step.proto)
step.add_property(PropertyNames.USER_NAME, full_label)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(tag)})
step.encoding = self._get_side_input_encoding(input_step.encoding)
output_info = {
PropertyNames.USER_NAME: '%s.%s' % (full_label, PropertyNames.OUTPUT),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}
if common_urns.side_inputs.MULTIMAP.urn == access_pattern:
output_info[PropertyNames.USE_INDEXED_FORMAT] = True
step.add_property(PropertyNames.OUTPUT_INFO, [output_info])
step.add_property(
PropertyNames.WINDOWING_STRATEGY,
self.serialize_windowing_strategy(windowing_strategy))
return step
def run_Impulse(self, transform_node, options):
standard_options = options.view_as(StandardOptions)
debug_options = options.view_as(DebugOptions)
use_fn_api = (debug_options.experiments and
'beam_fn_api' in debug_options.experiments)
use_streaming_engine = (
debug_options.experiments and
'enable_streaming_engine' in debug_options.experiments and
'enable_windmill_service' in debug_options.experiments)
step = self._add_step(
TransformNames.READ, transform_node.full_label, transform_node)
if (standard_options.streaming and
(not use_fn_api or not use_streaming_engine)):
step.add_property(PropertyNames.FORMAT, 'pubsub')
step.add_property(PropertyNames.PUBSUB_SUBSCRIPTION, '_starting_signal/')
else:
step.add_property(PropertyNames.FORMAT, 'impulse')
encoded_impulse_element = coders.WindowedValueCoder(
coders.BytesCoder(),
coders.coders.GlobalWindowCoder()).get_impl().encode_nested(
window.GlobalWindows.windowed_value(b''))
if use_fn_api:
encoded_impulse_as_str = self.byte_array_to_json_string(
encoded_impulse_element)
else:
encoded_impulse_as_str = base64.b64encode(
encoded_impulse_element).decode('ascii')
step.add_property(PropertyNames.IMPULSE_ELEMENT,
encoded_impulse_as_str)
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{PropertyNames.USER_NAME: (
'%s.%s' % (
transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}])
def run_Flatten(self, transform_node, options):
step = self._add_step(TransformNames.FLATTEN,
transform_node.full_label, transform_node)
inputs = []
for one_input in transform_node.inputs:
input_step = self._cache.get_pvalue(one_input)
inputs.append(
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(one_input.tag)})
step.add_property(PropertyNames.INPUTS, inputs)
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}])
def apply_WriteToBigQuery(self, transform, pcoll, options):
# Make sure this is the WriteToBigQuery class that we expected, and that
# users did not specifically request the new BQ sink by passing experiment
# flag.
# TODO(BEAM-6928): Remove this function for release 2.14.0.
experiments = options.view_as(DebugOptions).experiments or []
if (not isinstance(transform, beam.io.WriteToBigQuery)
or 'use_beam_bq_sink' in experiments):
return self.apply_PTransform(transform, pcoll, options)
if transform.schema == beam.io.gcp.bigquery.SCHEMA_AUTODETECT:
raise RuntimeError(
'Schema auto-detection is not supported on the native sink')
standard_options = options.view_as(StandardOptions)
if standard_options.streaming:
if (transform.write_disposition ==
beam.io.BigQueryDisposition.WRITE_TRUNCATE):
raise RuntimeError('Can not use write truncation mode in streaming')
return self.apply_PTransform(transform, pcoll, options)
else:
from apache_beam.io.gcp.bigquery_tools import parse_table_schema_from_json
schema = None
if transform.schema:
schema = parse_table_schema_from_json(json.dumps(transform.schema))
return pcoll | 'WriteToBigQuery' >> beam.io.Write(
beam.io.BigQuerySink(
transform.table_reference.tableId,
transform.table_reference.datasetId,
transform.table_reference.projectId,
schema,
transform.create_disposition,
transform.write_disposition,
kms_key=transform.kms_key))
def apply_GroupByKey(self, transform, pcoll, options):
# Infer coder of parent.
#
# TODO(ccy): make Coder inference and checking less specialized and more
# comprehensive.
parent = pcoll.producer
if parent:
coder = parent.transform._infer_output_coder() # pylint: disable=protected-access
if not coder:
coder = self._get_coder(pcoll.element_type or typehints.Any, None)
if not coder.is_kv_coder():
raise ValueError(('Coder for the GroupByKey operation "%s" is not a '
'key-value coder: %s.') % (transform.label,
coder))
# TODO(robertwb): Update the coder itself if it changed.
coders.registry.verify_deterministic(
coder.key_coder(), 'GroupByKey operation "%s"' % transform.label)
return pvalue.PCollection.from_(pcoll)
def run_GroupByKey(self, transform_node, options):
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.GROUP, transform_node.full_label, transform_node)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)})
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}])
windowing = transform_node.transform.get_windowing(
transform_node.inputs)
step.add_property(
PropertyNames.SERIALIZED_FN,
self.serialize_windowing_strategy(windowing))
def run_RunnerAPIPTransformHolder(self, transform_node, options):
"""Adding Dataflow runner job description for transform holder objects.
These holder transform objects are generated for some of the transforms that
become available after a cross-language transform expansion, usually if the
corresponding transform object cannot be generated in Python SDK (for
example, a python `ParDo` transform cannot be generated without a serialized
Python `DoFn` object).
"""
urn = transform_node.transform.proto().urn
assert urn
# TODO(chamikara): support other transforms that requires holder objects in
# Python SDk.
if common_urns.primitives.PAR_DO.urn == urn:
self.run_ParDo(transform_node, options)
else:
NotImplementedError(urn)
def run_ParDo(self, transform_node, options):
transform = transform_node.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
# Attach side inputs.
si_dict = {}
si_labels = {}
full_label_counts = defaultdict(int)
lookup_label = lambda side_pval: si_labels[side_pval]
named_inputs = transform_node.named_inputs()
label_renames = {}
for ix, side_pval in enumerate(transform_node.side_inputs):
assert isinstance(side_pval, AsSideInput)
step_name = 'SideInput-' + self._get_unique_step_name()
si_label = 'side%d-%s' % (ix, transform_node.full_label)
old_label = 'side%d' % ix
label_renames[old_label] = si_label
assert old_label in named_inputs
pcollection_label = '%s.%s' % (
side_pval.pvalue.producer.full_label.split('/')[-1],
side_pval.pvalue.tag if side_pval.pvalue.tag else 'out')
si_full_label = '%s/%s(%s.%s)' % (transform_node.full_label,
side_pval.__class__.__name__,
pcollection_label,
full_label_counts[pcollection_label])
# Count the number of times the same PCollection is a side input
# to the same ParDo.
full_label_counts[pcollection_label] += 1
self._add_singleton_step(
step_name, si_full_label, side_pval.pvalue.tag,
self._cache.get_pvalue(side_pval.pvalue),
side_pval.pvalue.windowing,
side_pval._side_input_data().access_pattern)
si_dict[si_label] = {
'@type': 'OutputReference',
PropertyNames.STEP_NAME: step_name,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}
si_labels[side_pval] = si_label
# Now create the step for the ParDo transform being handled.
transform_name = transform_node.full_label.rsplit('/', 1)[-1]
step = self._add_step(
TransformNames.DO,
transform_node.full_label + (
'/{}'.format(transform_name)
if transform_node.side_inputs else ''),
transform_node,
transform_node.transform.output_tags)
# Import here to avoid adding the dependency for local running scenarios.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.internal import apiclient
transform_proto = self.proto_context.transforms.get_proto(transform_node)
transform_id = self.proto_context.transforms.get_id(transform_node)
use_fnapi = apiclient._use_fnapi(options)
use_unified_worker = apiclient._use_unified_worker(options)
# The data transmitted in SERIALIZED_FN is different depending on whether
# this is a fnapi pipeline or not.
if (use_fnapi and
(transform_proto.spec.urn == common_urns.primitives.PAR_DO.urn or
use_unified_worker)):
# Patch side input ids to be unique across a given pipeline.
if (label_renames and
transform_proto.spec.urn == common_urns.primitives.PAR_DO.urn):
# Patch PTransform proto.
for old, new in iteritems(label_renames):
transform_proto.inputs[new] = transform_proto.inputs[old]
del transform_proto.inputs[old]
# Patch ParDo proto.
proto_type, _ = beam.PTransform._known_urns[transform_proto.spec.urn]
proto = proto_utils.parse_Bytes(transform_proto.spec.payload,
proto_type)
for old, new in iteritems(label_renames):
proto.side_inputs[new].CopyFrom(proto.side_inputs[old])
del proto.side_inputs[old]
transform_proto.spec.payload = proto.SerializeToString()
# We need to update the pipeline proto.
del self.proto_pipeline.components.transforms[transform_id]
(self.proto_pipeline.components.transforms[transform_id]
.CopyFrom(transform_proto))
serialized_data = transform_id
else:
serialized_data = pickler.dumps(
self._pardo_fn_data(transform_node, lookup_label))
step.add_property(PropertyNames.SERIALIZED_FN, serialized_data)
# TODO(BEAM-8882): Enable once dataflow service doesn't reject this.
# step.add_property(PropertyNames.PIPELINE_PROTO_TRANSFORM_ID, transform_id)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)})
# Add side inputs if any.
step.add_property(PropertyNames.NON_PARALLEL_INPUTS, si_dict)
# Generate description for the outputs. The output names
# will be 'out' for main output and 'out_<tag>' for a tagged output.
# Using 'out' as a tag will not clash with the name for main since it will
# be transformed into 'out_out' internally.
outputs = []
step.encoding = self._get_encoded_output_coder(transform_node)
all_output_tags = transform_proto.outputs.keys()
from apache_beam.transforms.core import RunnerAPIPTransformHolder
external_transform = isinstance(transform, RunnerAPIPTransformHolder)
# Some external transforms require output tags to not be modified.
# So we randomly select one of the output tags as the main output and
# leave others as side outputs. Transform execution should not change
# dependending on which output tag we choose as the main output here.
# Also, some SDKs do not work correctly if output tags are modified. So for
# external transforms, we leave tags unmodified.
main_output_tag = (
all_output_tags[0] if external_transform else PropertyNames.OUT)
# Python SDK uses 'None' as the tag of the main output.
tag_to_ignore = main_output_tag if external_transform else 'None'
side_output_tags = set(all_output_tags).difference({tag_to_ignore})
# Add the main output to the description.
outputs.append(
{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: main_output_tag})
for side_tag in side_output_tags:
# The assumption here is that all outputs will have the same typehint
# and coder as the main output. This is certainly the case right now
# but conceivably it could change in the future.
outputs.append(
{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, side_tag)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: (
side_tag if external_transform
else '%s_%s' % (PropertyNames.OUT, side_tag))})
step.add_property(PropertyNames.OUTPUT_INFO, outputs)
# Add the restriction encoding if we are a splittable DoFn
# and are using the Fn API on the unified worker.
restriction_coder = transform.get_restriction_coder()
if restriction_coder:
step.add_property(PropertyNames.RESTRICTION_ENCODING,
self._get_cloud_encoding(restriction_coder))
@staticmethod
def _pardo_fn_data(transform_node, get_label):
transform = transform_node.transform
si_tags_and_types = [ # pylint: disable=protected-access
(get_label(side_pval), side_pval.__class__, side_pval._view_options())
for side_pval in transform_node.side_inputs]
return (transform.fn, transform.args, transform.kwargs, si_tags_and_types,
transform_node.inputs[0].windowing)
def apply_CombineValues(self, transform, pcoll, options):
return pvalue.PCollection.from_(pcoll)
def run_CombineValues(self, transform_node, options):
transform = transform_node.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.COMBINE, transform_node.full_label, transform_node)
transform_id = self.proto_context.transforms.get_id(transform_node.parent)
# The data transmitted in SERIALIZED_FN is different depending on whether
# this is a fnapi pipeline or not.
from apache_beam.runners.dataflow.internal import apiclient
use_fnapi = apiclient._use_fnapi(options)
if use_fnapi:
# Fnapi pipelines send the transform ID of the CombineValues transform's
# parent composite because Dataflow expects the ID of a CombinePerKey
# transform.
serialized_data = transform_id
else:
# Combiner functions do not take deferred side-inputs (i.e. PValues) and
# therefore the code to handle extra args/kwargs is simpler than for the
# DoFn's of the ParDo transform. In the last, empty argument is where
# side inputs information would go.
serialized_data = pickler.dumps((transform.fn, transform.args,
transform.kwargs, ()))
step.add_property(PropertyNames.SERIALIZED_FN, serialized_data)
# TODO(BEAM-8882): Enable once dataflow service doesn't reject this.
# step.add_property(PropertyNames.PIPELINE_PROTO_TRANSFORM_ID, transform_id)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)})
# Note that the accumulator must not have a WindowedValue encoding, while
# the output of this step does in fact have a WindowedValue encoding.
accumulator_encoding = self._get_cloud_encoding(
transform_node.transform.fn.get_accumulator_coder())
output_encoding = self._get_encoded_output_coder(transform_node)
step.encoding = output_encoding
step.add_property(PropertyNames.ENCODING, accumulator_encoding)
# Generate description for main output 'out.'
outputs = []
# Add the main output to the description.
outputs.append(
{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT})
step.add_property(PropertyNames.OUTPUT_INFO, outputs)
def apply_Read(self, transform, pbegin, options):
if hasattr(transform.source, 'format'):
# Consider native Read to be a primitive for dataflow.
return beam.pvalue.PCollection.from_(pbegin)
else:
return self.apply_PTransform(transform, pbegin, options)
def run_Read(self, transform_node, options):
transform = transform_node.transform
step = self._add_step(
TransformNames.READ, transform_node.full_label, transform_node)
# TODO(mairbek): refactor if-else tree to use registerable functions.
# Initialize the source specific properties.
standard_options = options.view_as(StandardOptions)
if not hasattr(transform.source, 'format'):
# If a format is not set, we assume the source to be a custom source.
source_dict = {}
source_dict['spec'] = {
'@type': names.SOURCE_TYPE,
names.SERIALIZED_SOURCE_KEY: pickler.dumps(transform.source)
}
try:
source_dict['metadata'] = {
'estimated_size_bytes': json_value.get_typed_value_descriptor(
transform.source.estimate_size())
}
except error.RuntimeValueProviderError:
# Size estimation is best effort, and this error is by value provider.
_LOGGER.info(
'Could not estimate size of source %r due to ' + \
'RuntimeValueProviderError', transform.source)
except Exception: # pylint: disable=broad-except
# Size estimation is best effort. So we log the error and continue.
_LOGGER.info(
'Could not estimate size of source %r due to an exception: %s',
transform.source, traceback.format_exc())
step.add_property(PropertyNames.SOURCE_STEP_INPUT,
source_dict)
elif transform.source.format == 'text':
step.add_property(PropertyNames.FILE_PATTERN, transform.source.path)
elif transform.source.format == 'bigquery':
if standard_options.streaming:
raise ValueError('BigQuery source is not currently available for use '
'in streaming pipelines.')
step.add_property(PropertyNames.BIGQUERY_EXPORT_FORMAT, 'FORMAT_AVRO')
# TODO(silviuc): Add table validation if transform.source.validate.
if transform.source.table_reference is not None:
step.add_property(PropertyNames.BIGQUERY_DATASET,
transform.source.table_reference.datasetId)
step.add_property(PropertyNames.BIGQUERY_TABLE,
transform.source.table_reference.tableId)
# If project owning the table was not specified then the project owning
# the workflow (current project) will be used.
if transform.source.table_reference.projectId is not None:
step.add_property(PropertyNames.BIGQUERY_PROJECT,
transform.source.table_reference.projectId)
elif transform.source.query is not None:
step.add_property(PropertyNames.BIGQUERY_QUERY, transform.source.query)
step.add_property(PropertyNames.BIGQUERY_USE_LEGACY_SQL,
transform.source.use_legacy_sql)
step.add_property(PropertyNames.BIGQUERY_FLATTEN_RESULTS,
transform.source.flatten_results)
else:
raise ValueError('BigQuery source %r must specify either a table or'
' a query' % transform.source)
if transform.source.kms_key is not None:
step.add_property(
PropertyNames.BIGQUERY_KMS_KEY, transform.source.kms_key)
elif transform.source.format == 'pubsub':
if not standard_options.streaming:
raise ValueError('Cloud Pub/Sub is currently available for use '
'only in streaming pipelines.')
# Only one of topic or subscription should be set.
if transform.source.full_subscription:
step.add_property(PropertyNames.PUBSUB_SUBSCRIPTION,
transform.source.full_subscription)
elif transform.source.full_topic:
step.add_property(PropertyNames.PUBSUB_TOPIC,
transform.source.full_topic)
if transform.source.id_label:
step.add_property(PropertyNames.PUBSUB_ID_LABEL,
transform.source.id_label)
if transform.source.with_attributes:
# Setting this property signals Dataflow runner to return full
# PubsubMessages instead of just the data part of the payload.
step.add_property(PropertyNames.PUBSUB_SERIALIZED_ATTRIBUTES_FN, '')
if transform.source.timestamp_attribute is not None:
step.add_property(PropertyNames.PUBSUB_TIMESTAMP_ATTRIBUTE,
transform.source.timestamp_attribute)
else:
raise ValueError(
'Source %r has unexpected format %s.' % (
transform.source, transform.source.format))
if not hasattr(transform.source, 'format'):
step.add_property(PropertyNames.FORMAT, names.SOURCE_FORMAT)
else:
step.add_property(PropertyNames.FORMAT, transform.source.format)
# Wrap coder in WindowedValueCoder: this is necessary as the encoding of a
# step should be the type of value outputted by each step. Read steps
# automatically wrap output values in a WindowedValue wrapper, if necessary.
# This is also necessary for proper encoding for size estimation.
# Using a GlobalWindowCoder as a place holder instead of the default
# PickleCoder because GlobalWindowCoder is known coder.
# TODO(robertwb): Query the collection for the windowfn to extract the
# correct coder.
coder = coders.WindowedValueCoder(
coders.registry.get_coder(transform_node.outputs[None].element_type),
coders.coders.GlobalWindowCoder())
from apache_beam.runners.dataflow.internal import apiclient
step.encoding = self._get_cloud_encoding(coder)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}])
def run__NativeWrite(self, transform_node, options):
transform = transform_node.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.WRITE, transform_node.full_label, transform_node)
# TODO(mairbek): refactor if-else tree to use registerable functions.
# Initialize the sink specific properties.
if transform.sink.format == 'text':
# Note that it is important to use typed properties (@type/value dicts)
# for non-string properties and also for empty strings. For example,
# in the code below the num_shards must have type and also
# file_name_suffix and shard_name_template (could be empty strings).
step.add_property(
PropertyNames.FILE_NAME_PREFIX, transform.sink.file_name_prefix,
with_type=True)
step.add_property(
PropertyNames.FILE_NAME_SUFFIX, transform.sink.file_name_suffix,
with_type=True)
step.add_property(
PropertyNames.SHARD_NAME_TEMPLATE, transform.sink.shard_name_template,
with_type=True)
if transform.sink.num_shards > 0:
step.add_property(
PropertyNames.NUM_SHARDS, transform.sink.num_shards, with_type=True)
# TODO(silviuc): Implement sink validation.
step.add_property(PropertyNames.VALIDATE_SINK, False, with_type=True)
elif transform.sink.format == 'bigquery':
# TODO(silviuc): Add table validation if transform.sink.validate.
step.add_property(PropertyNames.BIGQUERY_DATASET,
transform.sink.table_reference.datasetId)
step.add_property(PropertyNames.BIGQUERY_TABLE,
transform.sink.table_reference.tableId)
# If project owning the table was not specified then the project owning
# the workflow (current project) will be used.
if transform.sink.table_reference.projectId is not None:
step.add_property(PropertyNames.BIGQUERY_PROJECT,
transform.sink.table_reference.projectId)
step.add_property(PropertyNames.BIGQUERY_CREATE_DISPOSITION,
transform.sink.create_disposition)
step.add_property(PropertyNames.BIGQUERY_WRITE_DISPOSITION,
transform.sink.write_disposition)
if transform.sink.table_schema is not None:
step.add_property(
PropertyNames.BIGQUERY_SCHEMA, transform.sink.schema_as_json())
if transform.sink.kms_key is not None:
step.add_property(
PropertyNames.BIGQUERY_KMS_KEY, transform.sink.kms_key)
elif transform.sink.format == 'pubsub':
standard_options = options.view_as(StandardOptions)
if not standard_options.streaming:
raise ValueError('Cloud Pub/Sub is currently available for use '
'only in streaming pipelines.')
step.add_property(PropertyNames.PUBSUB_TOPIC, transform.sink.full_topic)
if transform.sink.id_label:
step.add_property(PropertyNames.PUBSUB_ID_LABEL,
transform.sink.id_label)
if transform.sink.with_attributes:
# Setting this property signals Dataflow runner that the PCollection
# contains PubsubMessage objects instead of just raw data.
step.add_property(PropertyNames.PUBSUB_SERIALIZED_ATTRIBUTES_FN, '')
if transform.sink.timestamp_attribute is not None:
step.add_property(PropertyNames.PUBSUB_TIMESTAMP_ATTRIBUTE,
transform.sink.timestamp_attribute)
else:
raise ValueError(
'Sink %r has unexpected format %s.' % (
transform.sink, transform.sink.format))
step.add_property(PropertyNames.FORMAT, transform.sink.format)
# Wrap coder in WindowedValueCoder: this is necessary for proper encoding
# for size estimation. Using a GlobalWindowCoder as a place holder instead
# of the default PickleCoder because GlobalWindowCoder is known coder.
# TODO(robertwb): Query the collection for the windowfn to extract the
# correct coder.
coder = coders.WindowedValueCoder(transform.sink.coder,
coders.coders.GlobalWindowCoder())
from apache_beam.runners.dataflow.internal import apiclient
step.encoding = self._get_cloud_encoding(coder)
step.add_property(PropertyNames.ENCODING, step.encoding)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)})
def run_TestStream(self, transform_node, options):
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.testing.test_stream import ElementEvent
from apache_beam.testing.test_stream import ProcessingTimeEvent
from apache_beam.testing.test_stream import WatermarkEvent
standard_options = options.view_as(StandardOptions)
if not standard_options.streaming:
raise ValueError('TestStream is currently available for use '
'only in streaming pipelines.')
transform = transform_node.transform
step = self._add_step(TransformNames.READ, transform_node.full_label,
transform_node)
step.add_property(PropertyNames.FORMAT, 'test_stream')
test_stream_payload = beam_runner_api_pb2.TestStreamPayload()
# TestStream source doesn't do any decoding of elements,
# so we won't set test_stream_payload.coder_id.
output_coder = transform._infer_output_coder() # pylint: disable=protected-access
for event in transform.events:
new_event = test_stream_payload.events.add()
if isinstance(event, ElementEvent):
for tv in event.timestamped_values:
element = new_event.element_event.elements.add()
element.encoded_element = output_coder.encode(tv.value)
element.timestamp = tv.timestamp.micros
elif isinstance(event, ProcessingTimeEvent):
new_event.processing_time_event.advance_duration = (
event.advance_by.micros)
elif isinstance(event, WatermarkEvent):
new_event.watermark_event.new_watermark = event.new_watermark.micros
serialized_payload = self.byte_array_to_json_string(
test_stream_payload.SerializeToString())
step.add_property(PropertyNames.SERIALIZED_TEST_STREAM, serialized_payload)
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(PropertyNames.OUTPUT_INFO, [{
PropertyNames.USER_NAME:
('%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT
}])
# We must mark this method as not a test or else its name is a matcher for
# nosetest tests.
run_TestStream.__test__ = False
@classmethod
def serialize_windowing_strategy(cls, windowing):
from apache_beam.runners import pipeline_context
from apache_beam.portability.api import beam_runner_api_pb2
context = pipeline_context.PipelineContext()
windowing_proto = windowing.to_runner_api(context)
return cls.byte_array_to_json_string(
beam_runner_api_pb2.MessageWithComponents(
components=context.to_runner_api(),
windowing_strategy=windowing_proto).SerializeToString())
@classmethod
def deserialize_windowing_strategy(cls, serialized_data):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners import pipeline_context
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.transforms.core import Windowing
proto = beam_runner_api_pb2.MessageWithComponents()
proto.ParseFromString(cls.json_string_to_byte_array(serialized_data))
return Windowing.from_runner_api(
proto.windowing_strategy,
pipeline_context.PipelineContext(proto.components))
@staticmethod
def byte_array_to_json_string(raw_bytes):
"""Implements org.apache.beam.sdk.util.StringUtils.byteArrayToJsonString."""
return quote(raw_bytes)
@staticmethod
def json_string_to_byte_array(encoded_string):
"""Implements org.apache.beam.sdk.util.StringUtils.jsonStringToByteArray."""
return unquote_to_bytes(encoded_string)
class _DataflowSideInput(beam.pvalue.AsSideInput):
"""Wraps a side input as a dataflow-compatible side input."""
def _view_options(self):
return {
'data': self._data,
}
def _side_input_data(self):
return self._data
class _DataflowIterableSideInput(_DataflowSideInput):
"""Wraps an iterable side input as dataflow-compatible side input."""
def __init__(self, iterable_side_input):
# pylint: disable=protected-access
side_input_data = iterable_side_input._side_input_data()
assert (
side_input_data.access_pattern == common_urns.side_inputs.ITERABLE.urn)
iterable_view_fn = side_input_data.view_fn
self._data = beam.pvalue.SideInputData(
common_urns.side_inputs.MULTIMAP.urn,
side_input_data.window_mapping_fn,
lambda multimap: iterable_view_fn(multimap[b'']))
class _DataflowMultimapSideInput(_DataflowSideInput):
"""Wraps a multimap side input as dataflow-compatible side input."""
def __init__(self, side_input):
# pylint: disable=protected-access
self.pvalue = side_input.pvalue
side_input_data = side_input._side_input_data()
assert (
side_input_data.access_pattern == common_urns.side_inputs.MULTIMAP.urn)
self._data = beam.pvalue.SideInputData(
common_urns.side_inputs.MULTIMAP.urn,
side_input_data.window_mapping_fn,
side_input_data.view_fn)
class DataflowPipelineResult(PipelineResult):
"""Represents the state of a pipeline run on the Dataflow service."""
def __init__(self, job, runner):
"""Initialize a new DataflowPipelineResult instance.
Args:
job: Job message from the Dataflow API. Could be :data:`None` if a job
request was not sent to Dataflow service (e.g. template jobs).
runner: DataflowRunner instance.
"""
self._job = job
self._runner = runner
self.metric_results = None
def _update_job(self):
# We need the job id to be able to update job information. There is no need
# to update the job if we are in a known terminal state.
if self.has_job and not self.is_in_terminal_state():
self._job = self._runner.dataflow_client.get_job(self.job_id())
def job_id(self):
return self._job.id
def metrics(self):
return self.metric_results
@property
def has_job(self):
return self._job is not None
def _get_job_state(self):
values_enum = dataflow_api.Job.CurrentStateValueValuesEnum
# Ordered by the enum values. Values that may be introduced in
# future versions of Dataflow API are considered UNRECOGNIZED by the SDK.
api_jobstate_map = defaultdict(lambda: PipelineState.UNRECOGNIZED, {
values_enum.JOB_STATE_UNKNOWN: PipelineState.UNKNOWN,
values_enum.JOB_STATE_STOPPED: PipelineState.STOPPED,
values_enum.JOB_STATE_RUNNING: PipelineState.RUNNING,
values_enum.JOB_STATE_DONE: PipelineState.DONE,
values_enum.JOB_STATE_FAILED: PipelineState.FAILED,
values_enum.JOB_STATE_CANCELLED: PipelineState.CANCELLED,
values_enum.JOB_STATE_UPDATED: PipelineState.UPDATED,
values_enum.JOB_STATE_DRAINING: PipelineState.DRAINING,
values_enum.JOB_STATE_DRAINED: PipelineState.DRAINED,
values_enum.JOB_STATE_PENDING: PipelineState.PENDING,
values_enum.JOB_STATE_CANCELLING: PipelineState.CANCELLING,
})
return (api_jobstate_map[self._job.currentState] if self._job.currentState
else PipelineState.UNKNOWN)
@property
def state(self):
"""Return the current state of the remote job.
Returns:
A PipelineState object.
"""
if not self.has_job:
return PipelineState.UNKNOWN
self._update_job()
return self._get_job_state()
def is_in_terminal_state(self):
if not self.has_job:
return True
return PipelineState.is_terminal(self._get_job_state())
def wait_until_finish(self, duration=None):
if not self.is_in_terminal_state():
if not self.has_job:
raise IOError('Failed to get the Dataflow job id.')
thread = threading.Thread(
target=DataflowRunner.poll_for_job_completion,
args=(self._runner, self, duration))
# Mark the thread as a daemon thread so a keyboard interrupt on the main
# thread will terminate everything. This is also the reason we will not
# use thread.join() to wait for the polling thread.
thread.daemon = True
thread.start()
while thread.isAlive():
time.sleep(5.0)
# TODO: Merge the termination code in poll_for_job_completion and
# is_in_terminal_state.
terminated = self.is_in_terminal_state()
assert duration or terminated, (
'Job did not reach to a terminal state after waiting indefinitely.')
if terminated and self.state != PipelineState.DONE:
# TODO(BEAM-1290): Consider converting this to an error log based on
# theresolution of the issue.
raise DataflowRuntimeException(
'Dataflow pipeline failed. State: %s, Error:\n%s' %
(self.state, getattr(self._runner, 'last_error_msg', None)), self)
return self.state
def cancel(self):
if not self.has_job:
raise IOError('Failed to get the Dataflow job id.')
self._update_job()
if self.is_in_terminal_state():
_LOGGER.warning(
'Cancel failed because job %s is already terminated in state %s.',
self.job_id(), self.state)
else:
if not self._runner.dataflow_client.modify_job_state(
self.job_id(), 'JOB_STATE_CANCELLED'):
cancel_failed_message = (
'Failed to cancel job %s, please go to the Developers Console to '
'cancel it manually.') % self.job_id()
_LOGGER.error(cancel_failed_message)
raise DataflowRuntimeException(cancel_failed_message, self)
return self.state
def __str__(self):
return '<%s %s %s>' % (
self.__class__.__name__,
self.job_id(),
self.state)
def __repr__(self):
return '<%s %s at %s>' % (self.__class__.__name__, self._job, hex(id(self)))
class DataflowRuntimeException(Exception):
"""Indicates an error has occurred in running this pipeline."""
def __init__(self, msg, result):
super(DataflowRuntimeException, self).__init__(msg)
self.result = result
|
RyanSkraba/beam
|
sdks/python/apache_beam/runners/dataflow/dataflow_runner.py
|
Python
|
apache-2.0
| 65,693
|
[
"VisIt"
] |
31a26c8603f2e673d162d61664b62ad0b99aca7ea8e663e34420417bc77258a6
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'NeuronArticleMap'
db.create_table('neuroelectro_neuronarticlemap', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('neuron', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['neuroelectro.Neuron'])),
('neuron_syn', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['neuroelectro.NeuronSyn'], null=True)),
('num_mentions', self.gf('django.db.models.fields.IntegerField')(null=True)),
('article', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['neuroelectro.Article'], null=True)),
('date_mod', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('added_by', self.gf('django.db.models.fields.CharField')(default='robot', max_length=20)),
))
db.send_create_signal('neuroelectro', ['NeuronArticleMap'])
def backwards(self, orm):
# Deleting model 'NeuronArticleMap'
db.delete_table('neuroelectro_neuronarticlemap')
models = {
'neuroelectro.article': {
'Meta': {'object_name': 'Article'},
'abstract': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True'}),
'full_text_link': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Journal']", 'null': 'True'}),
'pmid': ('django.db.models.fields.IntegerField', [], {}),
'substances': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.Substance']", 'null': 'True', 'symmetrical': 'False'}),
'terms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.MeshTerm']", 'null': 'True', 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.articlefulltext': {
'Meta': {'object_name': 'ArticleFullText'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']"}),
'full_text': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'neuroelectro.brainregion': {
'Meta': {'object_name': 'BrainRegion'},
'abbrev': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'allenid': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isallen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'treedepth': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'neuroelectro.datatable': {
'Meta': {'object_name': 'DataTable'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']"}),
'ephys_props': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.EphysProp']", 'null': 'True', 'through': "orm['neuroelectro.EphysConceptMap']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'neurons': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.Neuron']", 'null': 'True', 'symmetrical': 'False'}),
'table_html': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'table_text': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True'})
},
'neuroelectro.datatabletag': {
'Meta': {'object_name': 'DataTableTag'},
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"})
},
'neuroelectro.ephysconceptmap': {
'Meta': {'object_name': 'EphysConceptMap'},
'added_by': ('django.db.models.fields.CharField', [], {'default': "'robot'", 'max_length': '20'}),
'data_table': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.DataTable']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'dt_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}),
'ephys_prop_syn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysPropSyn']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'match_quality': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'ref_text': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'neuroelectro.ephysprop': {
'Meta': {'object_name': 'EphysProp'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'})
},
'neuroelectro.ephyspropsyn': {
'Meta': {'object_name': 'EphysPropSyn'},
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'neuroelectro.insituexpt': {
'Meta': {'object_name': 'InSituExpt'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imageseriesid': ('django.db.models.fields.IntegerField', [], {}),
'plane': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'regionexprs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.RegionExpr']", 'null': 'True', 'symmetrical': 'False'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'neuroelectro.journal': {
'Meta': {'object_name': 'Journal'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'neuroelectro.meshterm': {
'Meta': {'object_name': 'MeshTerm'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'neuroelectro.neuron': {
'Meta': {'object_name': 'Neuron'},
'added_by': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'defining_articles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.Article']", 'null': 'True', 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'nlex_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'regions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.BrainRegion']", 'null': 'True', 'symmetrical': 'False'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.NeuronSyn']", 'null': 'True', 'symmetrical': 'False'})
},
'neuroelectro.neuronarticlemap': {
'Meta': {'object_name': 'NeuronArticleMap'},
'added_by': ('django.db.models.fields.CharField', [], {'default': "'robot'", 'max_length': '20'}),
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"}),
'neuron_syn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.NeuronSyn']", 'null': 'True'}),
'num_mentions': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'neuroelectro.neuronconceptmap': {
'Meta': {'object_name': 'NeuronConceptMap'},
'added_by': ('django.db.models.fields.CharField', [], {'default': "'robot'", 'max_length': '20'}),
'data_table': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.DataTable']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'dt_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'match_quality': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"}),
'neuron_syn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.NeuronSyn']", 'null': 'True'}),
'ref_text': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'neuroelectro.neuronephyslink': {
'Meta': {'object_name': 'NeuronEphysLink'},
'data_table': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.DataTable']"}),
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"}),
'num_reps': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'val': ('django.db.models.fields.FloatField', [], {}),
'val_err': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'neuroelectro.neuronsyn': {
'Meta': {'object_name': 'NeuronSyn'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.protein': {
'Meta': {'object_name': 'Protein'},
'allenid': ('django.db.models.fields.IntegerField', [], {}),
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True'}),
'entrezid': ('django.db.models.fields.IntegerField', [], {}),
'gene': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_situ_expts': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.InSituExpt']", 'null': 'True', 'symmetrical': 'False'}),
'is_channel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.ProteinSyn']", 'null': 'True', 'symmetrical': 'False'})
},
'neuroelectro.proteinsyn': {
'Meta': {'object_name': 'ProteinSyn'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.regionexpr': {
'Meta': {'object_name': 'RegionExpr'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'to': "orm['neuroelectro.BrainRegion']"}),
'val': ('django.db.models.fields.FloatField', [], {})
},
'neuroelectro.species': {
'Meta': {'object_name': 'Species'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'specie': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.substance': {
'Meta': {'object_name': 'Substance'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'neuroelectro.superprotein': {
'Meta': {'object_name': 'SuperProtein'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_channel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.ProteinSyn']", 'null': 'True', 'symmetrical': 'False'})
},
'neuroelectro.unit': {
'Meta': {'object_name': 'Unit'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
}
}
complete_apps = ['neuroelectro']
|
lessc0de/neuroelectro_org
|
neuroelectro/south_migrations/0021_auto__add_neuronarticlemap.py
|
Python
|
gpl-2.0
| 15,155
|
[
"NEURON"
] |
55db47f9655cea72fe2feece7346a206091586ee8d7ea2527bf71217c2bfaf75
|
# Copyright (c) Charl P. Botha, TU Delft
# All rights reserved.
# See COPYRIGHT for details.
import itk
import module_kits.itk_kit as itk_kit
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
class cannyEdgeDetection(ScriptedConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
self._config.variance = (0.7, 0.7, 0.7)
self._config.maximum_error = (0.01, 0.01, 0.01)
self._config.upper_threshold = 0.0
self._config.lower_threshold = 0.0
self._config.outside_value = 0.0
configList = [
('Variance:', 'variance', 'tuple:float,3', 'text',
'Variance of Gaussian used for smoothing the input image (units: '
'true spacing).'),
('Maximum error:', 'maximum_error', 'tuple:float,3', 'text',
'The discrete Gaussian kernel will be sized so that the '
'truncation error is smaller than this.'),
('Upper threshold:', 'upper_threshold', 'base:float', 'text',
'Highest allowed value in the output image.'),
('Lower threshold:', 'lower_threshold', 'base:float', 'text',
'Lowest allowed value in the output image.'),
('Outside value:', 'outside_value', 'base:float', 'text',
'Pixels lower than threshold will be set to this.')]
# setup the pipeline
if3 = itk.Image[itk.F, 3]
self._canny = itk.CannyEdgeDetectionImageFilter[if3, if3].New()
itk_kit.utils.setupITKObjectProgress(
self, self._canny, 'itkCannyEdgeDetectionImageFilter',
'Performing Canny edge detection')
ScriptedConfigModuleMixin.__init__(
self, configList,
{'Module (self)' : self})
self.sync_module_logic_with_config()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of all display thingies
ScriptedConfigModuleMixin.close(self)
# and the baseclass close
ModuleBase.close(self)
# remove all bindings
del self._canny
def execute_module(self):
# create a new canny filter
if3 = itk.Image.F3
c = itk.CannyEdgeDetectionImageFilter[if3, if3].New()
c.SetInput(self._canny.GetInput())
# disconnect the old one
self._canny.SetInput(None)
# replace it with the new one
self._canny = c
# setup new progress handler
itk_kit.utils.setupITKObjectProgress(
self, self._canny, 'itkCannyEdgeDetectionImageFilter',
'Performing Canny edge detection')
# apply our config
self.sync_module_logic_with_config()
# and go!
self._canny.Update()
def get_input_descriptions(self):
return ('ITK Image (3D, float)',)
def set_input(self, idx, inputStream):
self._canny.SetInput(inputStream)
def get_output_descriptions(self):
return ('ITK Edge Image (3D, float)',)
def get_output(self, idx):
return self._canny.GetOutput()
def config_to_logic(self):
# thanks to WrapITK, we can now set / get tuples / lists!
# VARIANCE
self._canny.SetVariance(self._config.variance)
# MAXIMUM ERROR
self._canny.SetMaximumError(self._config.maximum_error)
# THRESHOLD
self._canny.SetUpperThreshold(self._config.upper_threshold)
self._canny.SetLowerThreshold(self._config.lower_threshold)
# OUTSIDE VALUE
self._canny.SetOutsideValue(self._config.outside_value)
def logic_to_config(self):
# VARIANCE
self._config.variance = tuple(self._canny.GetVariance())
# MAXIMUM ERROR
self._config.maximum_error = \
tuple(self._canny.GetMaximumError())
# THRESHOLDS
self._config.upper_threshold = self._canny.GetUpperThreshold()
self._config.lower_threshold = self._canny.GetLowerThreshold()
# OUTSIDE VALUE
self._config.outside_value = self._canny.GetOutsideValue()
|
nagyistoce/devide
|
modules/insight/cannyEdgeDetection.py
|
Python
|
bsd-3-clause
| 4,373
|
[
"Gaussian"
] |
9bf6e46a3d55c96be4607fd7616b281a43198bb5b8b0f7df02287315fbc3b170
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime as dt
import simplejson as json
import decimal
import random
from collections import namedtuple, OrderedDict
import pytest
from marshmallow import (
Schema, fields, utils, MarshalResult, UnmarshalResult,
validates, validates_schema
)
from marshmallow.exceptions import ValidationError
from tests.base import * # noqa
random.seed(1)
# Run tests with both verbose serializer and "meta" option serializer
@pytest.mark.parametrize('SchemaClass',
[UserSchema, UserMetaSchema])
def test_serializing_basic_object(SchemaClass, user):
s = SchemaClass()
data, errors = s.dump(user)
assert data['name'] == user.name
assert_almost_equal(data['age'], 42.3)
assert data['registered']
def test_serializer_dump(user):
s = UserSchema()
result, errors = s.dump(user)
assert result['name'] == user.name
# Change strict mode
s.strict = True
bad_user = User(name='Monty', age='badage')
with pytest.raises(ValidationError):
s.dump(bad_user)
def test_dump_returns_dict_of_errors():
s = UserSchema()
bad_user = User(name='Monty', age='badage')
result, errors = s.dump(bad_user)
assert 'age' in errors
@pytest.mark.parametrize('SchemaClass',
[
UserSchema, UserMetaSchema
])
def test_dump_with_strict_mode_raises_error(SchemaClass):
s = SchemaClass(strict=True)
bad_user = User(name='Monty', homepage='http://www.foo.bar', email='invalid-email')
with pytest.raises(ValidationError) as excinfo:
s.dump(bad_user)
exc = excinfo.value
assert type(exc.fields[0]) == fields.Email
assert exc.field_names[0] == 'email'
assert type(exc.messages) == dict
assert exc.messages == {'email': ['Not a valid email address.']}
def test_dump_resets_errors():
class MySchema(Schema):
email = fields.Email()
schema = MySchema()
result = schema.dump(User('Joe', email='notvalid'))
assert len(result.errors['email']) == 1
assert 'Not a valid email address.' in result.errors['email'][0]
result = schema.dump(User('Steve', email='__invalid'))
assert len(result.errors['email']) == 1
assert 'Not a valid email address.' in result.errors['email'][0]
def test_load_resets_errors():
class MySchema(Schema):
email = fields.Email()
schema = MySchema()
result = schema.load({'name': 'Joe', 'email': 'notvalid'})
assert len(result.errors['email']) == 1
assert 'Not a valid email address.' in result.errors['email'][0]
result = schema.load({'name': 'Joe', 'email': '__invalid'})
assert len(result.errors['email']) == 1
assert 'Not a valid email address.' in result.errors['email'][0]
def test_strict_load_validation_error_stores_input_data_and_valid_data():
class MySchema(Schema):
always_valid = fields.DateTime()
always_invalid = fields.Field(validate=[lambda v: False])
class Meta:
strict = True
schema = MySchema()
input_data = {'always_valid': dt.datetime.utcnow().isoformat(), 'always_invalid': 24}
try:
schema.load(input_data)
except ValidationError as err:
# err.data is the raw input data
assert err.data == input_data
assert 'always_valid' in err.valid_data
# err.valid_data contains valid, deserialized data
assert isinstance(err.valid_data['always_valid'], dt.datetime)
# excludes invalid data
assert 'always_invalid' not in err.valid_data
else:
pytest.fail('Data is invalid. Expected a ValidationError to be raised.')
def test_strict_dump_validation_error_stores_partially_valid_data():
class FailOnDump(fields.Field):
def _serialize(self, *args, **kwargs):
raise ValidationError('fail')
class MySchema(Schema):
always_valid = fields.DateTime()
always_invalid = FailOnDump()
class Meta:
strict = True
schema = MySchema()
input_data = {'always_valid': dt.datetime.utcnow(), 'always_invalid': 24}
try:
schema.dump(input_data)
except ValidationError as err:
# err.data is the raw input data
assert err.data == input_data
assert 'always_valid' in err.valid_data
# err.valid_data contains valid, serialized data
assert isinstance(err.valid_data['always_valid'], str)
# excludes invalid data
assert 'always_invalid' not in err.valid_data
else:
pytest.fail('Data is invalid. Expected a ValidationError to be raised.')
def test_dump_resets_error_fields():
class MySchema(Schema):
email = fields.Email()
schema = MySchema(strict=True)
with pytest.raises(ValidationError) as excinfo:
schema.dump(User('Joe', email='notvalid'))
exc = excinfo.value
assert len(exc.fields) == 1
assert len(exc.field_names) == 1
with pytest.raises(ValidationError) as excinfo:
schema.dump(User('Joe', email='__invalid'))
assert len(exc.fields) == 1
assert len(exc.field_names) == 1
def test_load_resets_error_fields():
class MySchema(Schema):
email = fields.Email()
schema = MySchema(strict=True)
with pytest.raises(ValidationError) as excinfo:
schema.load({'name': 'Joe', 'email': 'not-valid'})
exc = excinfo.value
assert len(exc.fields) == 1
assert len(exc.field_names) == 1
with pytest.raises(ValidationError) as excinfo:
schema.load({'name': 'Joe', 'email': '__invalid'})
assert len(exc.fields) == 1
assert len(exc.field_names) == 1
def test_load_resets_error_kwargs():
class MySchema(Schema):
name = fields.String()
@validates_schema
def validate_all(self, data):
if data:
raise ValidationError('oops', custom_error_kwarg=data)
else:
raise ValidationError('oops')
schema = MySchema(strict=True)
with pytest.raises(ValidationError) as excinfo:
schema.load({'name': 'Joe'})
exc = excinfo.value
assert exc.kwargs['custom_error_kwarg'] == {'name': 'Joe'}
with pytest.raises(ValidationError) as excinfo:
schema.load({})
exc = excinfo.value
assert 'custom_error_kwarg' not in exc.kwargs
def test_errored_fields_do_not_appear_in_output():
class MyField(fields.Field):
# Make sure validation fails during serialization
def _serialize(self, val, attr, obj):
raise ValidationError('oops')
class MySchema(Schema):
foo = MyField(validate=lambda x: False)
sch = MySchema()
data, errors = sch.load({'foo': 2})
assert 'foo' in errors
assert 'foo' not in data
data, errors = sch.dump({'foo': 2})
assert 'foo' in errors
assert 'foo' not in data
def test_load_many_stores_error_indices():
s = UserSchema()
data = [
{'name': 'Mick', 'email': 'mick@stones.com'},
{'name': 'Keith', 'email': 'invalid-email', 'homepage': 'invalid-homepage'},
]
_, errors = s.load(data, many=True)
assert 0 not in errors
assert 1 in errors
assert 'email' in errors[1]
assert 'homepage' in errors[1]
def test_dump_many():
s = UserSchema()
u1, u2 = User('Mick'), User('Keith')
data, errors = s.dump([u1, u2], many=True)
assert len(data) == 2
assert data[0] == s.dump(u1).data
def test_multiple_errors_can_be_stored_for_a_given_index():
class MySchema(Schema):
foo = fields.Str(validate=lambda x: len(x) > 3)
bar = fields.Int(validate=lambda x: x > 3)
sch = MySchema()
valid = {'foo': 'loll', 'bar': 42}
invalid = {'foo': 'lol', 'bar': 3}
errors = sch.validate([valid, invalid], many=True)
assert 1 in errors
assert len(errors[1]) == 2
assert 'foo' in errors[1]
assert 'bar' in errors[1]
def test_dump_many_stores_error_indices():
s = UserSchema()
u1, u2 = User('Mick', email='mick@stones.com'), User('Keith', email='invalid')
_, errors = s.dump([u1, u2], many=True)
assert 1 in errors
assert len(errors[1]) == 1
assert 'email' in errors[1]
def test_dump_many_doesnt_stores_error_indices_when_index_errors_is_false():
class NoIndex(Schema):
email = fields.Email()
class Meta:
index_errors = False
s = NoIndex()
u1, u2 = User('Mick', email='mick@stones.com'), User('Keith', email='invalid')
_, errors = s.dump([u1, u2], many=True)
assert 1 not in errors
assert 'email' in errors
def test_dump_returns_a_marshalresult(user):
s = UserSchema()
result = s.dump(user)
assert type(result) == MarshalResult
data = result.data
assert type(data) == dict
errors = result.errors
assert type(errors) == dict
def test_dumps_returns_a_marshalresult(user):
s = UserSchema()
result = s.dumps(user)
assert type(result) == MarshalResult
assert type(result.data) == str
assert type(result.errors) == dict
def test_dumping_single_object_with_collection_schema(user):
s = UserSchema(many=True)
result = s.dump(user, many=False)
assert type(result.data) == dict
assert result.data == UserSchema().dump(user).data
def test_loading_single_object_with_collection_schema():
s = UserSchema(many=True)
in_data = {'name': 'Mick', 'email': 'mick@stones.com'}
result = s.load(in_data, many=False)
assert type(result.data) == User
assert result.data.name == UserSchema().load(in_data).data.name
def test_dumps_many():
s = UserSchema()
u1, u2 = User('Mick'), User('Keith')
json_result = s.dumps([u1, u2], many=True)
data = json.loads(json_result.data)
assert len(data) == 2
assert data[0] == s.dump(u1).data
def test_load_returns_an_unmarshalresult():
s = UserSchema()
result = s.load({'name': 'Monty'})
assert type(result) == UnmarshalResult
assert type(result.data) == User
assert type(result.errors) == dict
def test_load_many():
s = UserSchema()
in_data = [{'name': 'Mick'}, {'name': 'Keith'}]
result = s.load(in_data, many=True)
assert type(result.data) == list
assert type(result.data[0]) == User
assert result.data[0].name == 'Mick'
def test_loads_returns_an_unmarshalresult(user):
s = UserSchema()
result = s.loads(json.dumps({'name': 'Monty'}))
assert type(result) == UnmarshalResult
assert type(result.data) == User
assert type(result.errors) == dict
def test_loads_many():
s = UserSchema()
in_data = [{'name': 'Mick'}, {'name': 'Keith'}]
in_json_data = json.dumps(in_data)
result = s.loads(in_json_data, many=True)
assert type(result.data) == list
assert result.data[0].name == 'Mick'
def test_loads_deserializes_from_json():
user_dict = {'name': 'Monty', 'age': '42.3'}
user_json = json.dumps(user_dict)
result, errors = UserSchema().loads(user_json)
assert isinstance(result, User)
assert result.name == 'Monty'
assert_almost_equal(result.age, 42.3)
def test_serializing_none():
class MySchema(Schema):
id = fields.Str(default='no-id')
num = fields.Int()
name = fields.Str()
s = UserSchema().dump(None)
assert s.data == {'id': 'no-id'}
assert s.errors == {}
def test_default_many_symmetry():
"""The dump/load(s) methods should all default to the many value of the schema."""
s_many = UserSchema(many=True, only=('name',))
s_single = UserSchema(only=('name',))
u1, u2 = User('King Arthur'), User('Sir Lancelot')
s_single.load(s_single.dump(u1).data)
s_single.loads(s_single.dumps(u1).data)
s_many.load(s_many.dump([u1, u2]).data)
s_many.loads(s_many.dumps([u1, u2]).data)
def test_on_bind_field_hook():
class MySchema(Schema):
foo = fields.Str()
def on_bind_field(self, field_name, field_obj):
assert field_obj.parent is self
field_obj.metadata['fname'] = field_name
schema = MySchema()
assert schema.fields['foo'].metadata['fname'] == 'foo'
def test_nested_on_bind_field_hook():
class MySchema(Schema):
class NestedSchema(Schema):
bar = fields.Str()
def on_bind_field(self, field_name, field_obj):
field_obj.metadata['fname'] = self.context['fname']
foo = fields.Nested(NestedSchema)
schema1 = MySchema(context={'fname': 'foobar'})
schema2 = MySchema(context={'fname': 'quxquux'})
assert schema1.fields['foo'].schema.fields['bar'].metadata['fname'] == 'foobar'
assert schema2.fields['foo'].schema.fields['bar'].metadata['fname'] == 'quxquux'
class TestValidate:
def test_validate_returns_errors_dict(self):
s = UserSchema()
errors = s.validate({'email': 'bad-email', 'name': 'Valid Name'})
assert type(errors) is dict
assert 'email' in errors
assert 'name' not in errors
valid_data = {'name': 'Valid Name', 'email': 'valid@email.com'}
errors = s.validate(valid_data)
assert errors == {}
def test_validate_many(self):
s = UserSchema(many=True)
in_data = [
{'name': 'Valid Name', 'email': 'validemail@hotmail.com'},
{'name': 'Valid Name2', 'email': 'invalid'}
]
errors = s.validate(in_data, many=True)
assert 1 in errors
assert 'email' in errors[1]
def test_validate_many_doesnt_store_index_if_index_errors_option_is_false(self):
class NoIndex(Schema):
email = fields.Email()
class Meta:
index_errors = False
s = NoIndex()
in_data = [
{'name': 'Valid Name', 'email': 'validemail@hotmail.com'},
{'name': 'Valid Name2', 'email': 'invalid'}
]
errors = s.validate(in_data, many=True)
assert 1 not in errors
assert 'email' in errors
def test_validate_strict(self):
s = UserSchema(strict=True)
with pytest.raises(ValidationError) as excinfo:
s.validate({'email': 'bad-email'})
exc = excinfo.value
assert exc.messages == {'email': ['Not a valid email address.']}
assert type(exc.fields[0]) == fields.Email
def test_validate_required(self):
class MySchema(Schema):
foo = fields.Field(required=True)
s = MySchema()
errors = s.validate({'bar': 42})
assert 'foo' in errors
assert 'required' in errors['foo'][0]
@pytest.mark.parametrize('SchemaClass',
[UserSchema, UserMetaSchema])
def test_fields_are_not_copies(SchemaClass):
s = SchemaClass()
s2 = SchemaClass()
assert s.fields is not s2.fields
def test_dumps_returns_json(user):
ser = UserSchema()
serialized, errors = ser.dump(user)
json_data, errors = ser.dumps(user)
assert type(json_data) == str
expected = json.dumps(serialized)
assert json_data == expected
def test_naive_datetime_field(user, serialized_user):
expected = utils.isoformat(user.created)
assert serialized_user.data['created'] == expected
def test_datetime_formatted_field(user, serialized_user):
result = serialized_user.data['created_formatted']
assert result == user.created.strftime("%Y-%m-%d")
def test_datetime_iso_field(user, serialized_user):
assert serialized_user.data['created_iso'] == utils.isoformat(user.created)
def test_tz_datetime_field(user, serialized_user):
# Datetime is corrected back to GMT
expected = utils.isoformat(user.updated)
assert serialized_user.data['updated'] == expected
def test_local_datetime_field(user, serialized_user):
expected = utils.isoformat(user.updated, localtime=True)
assert serialized_user.data['updated_local'] == expected
def test_class_variable(serialized_user):
assert serialized_user.data['species'] == 'Homo sapiens'
@pytest.mark.parametrize('SchemaClass',
[UserSchema, UserMetaSchema])
def test_serialize_many(SchemaClass):
user1 = User(name="Mick", age=123)
user2 = User(name="Keith", age=456)
users = [user1, user2]
serialized = SchemaClass(many=True).dump(users)
assert len(serialized.data) == 2
assert serialized.data[0]['name'] == "Mick"
assert serialized.data[1]['name'] == "Keith"
def test_inheriting_schema(user):
sch = ExtendedUserSchema()
result = sch.dump(user)
assert result.data['name'] == user.name
user.is_old = True
result = sch.dump(user)
assert result.data['is_old'] is True
def test_custom_field(serialized_user, user):
assert serialized_user.data['uppername'] == user.name.upper()
def test_url_field(serialized_user, user):
assert serialized_user.data['homepage'] == user.homepage
def test_relative_url_field():
u = {'name': 'John', 'homepage': '/foo'}
result, errors = UserRelativeUrlSchema().load(u)
assert 'homepage' not in errors
@pytest.mark.parametrize('SchemaClass',
[UserSchema, UserMetaSchema])
def test_stores_invalid_url_error(SchemaClass):
user = {'name': 'Steve', 'homepage': 'www.foo.com'}
result = SchemaClass().load(user)
assert "homepage" in result.errors
expected = ['Not a valid URL.']
assert result.errors['homepage'] == expected
@pytest.mark.parametrize('SchemaClass',
[UserSchema, UserMetaSchema])
def test_email_field(SchemaClass):
u = User("John", email="john@example.com")
s = SchemaClass().dump(u)
assert s.data['email'] == "john@example.com"
def test_stored_invalid_email():
u = {'name': 'John', 'email': 'johnexample.com'}
s = UserSchema().load(u)
assert "email" in s.errors
assert s.errors['email'][0] == 'Not a valid email address.'
def test_integer_field():
u = User("John", age=42.3)
serialized = UserIntSchema().dump(u)
assert type(serialized.data['age']) == int
assert serialized.data['age'] == 42
def test_as_string():
u = User("John", age=42.3)
serialized = UserFloatStringSchema().dump(u)
assert type(serialized.data['age']) == str
assert_almost_equal(float(serialized.data['age']), 42.3)
@pytest.mark.parametrize('SchemaClass',
[UserSchema, UserMetaSchema])
def test_method_field(SchemaClass, serialized_user):
assert serialized_user.data['is_old'] is False
u = User("Joe", age=81)
assert SchemaClass().dump(u).data['is_old'] is True
def test_function_field(serialized_user, user):
assert serialized_user.data['lowername'] == user.name.lower()
@pytest.mark.parametrize('SchemaClass',
[UserSchema, UserMetaSchema])
def test_prefix(SchemaClass, user):
s = SchemaClass(prefix="usr_").dump(user)
assert s.data['usr_name'] == user.name
def test_fields_must_be_declared_as_instances(user):
class BadUserSchema(Schema):
name = fields.String
with pytest.raises(TypeError) as excinfo:
BadUserSchema().dump(user)
assert 'must be declared as a Field instance' in str(excinfo)
@pytest.mark.parametrize('SchemaClass',
[UserSchema, UserMetaSchema])
def test_serializing_generator(SchemaClass):
users = [User("Foo"), User("Bar")]
user_gen = (u for u in users)
s = SchemaClass(many=True).dump(user_gen)
assert len(s.data) == 2
assert s.data[0] == SchemaClass().dump(users[0]).data
def test_serializing_empty_list_returns_empty_list():
assert UserSchema(many=True).dump([]).data == []
assert UserMetaSchema(many=True).dump([]).data == []
def test_serializing_dict():
user = {"name": "foo", "email": "foo@bar.com", "age": 'badage', "various_data": {"foo": "bar"}}
s = UserSchema().dump(user)
assert s.data['name'] == "foo"
assert 'age' in s.errors
assert 'age' not in s.data
assert s.data['various_data'] == {"foo": "bar"}
def test_serializing_dict_with_meta_fields():
class MySchema(Schema):
class Meta:
fields = ('foo', 'bar')
sch = MySchema()
data, errors = sch.dump({'foo': 42, 'bar': 24, 'baz': 424})
assert not errors
assert data['foo'] == 42
assert data['bar'] == 24
assert 'baz' not in data
@pytest.mark.parametrize('SchemaClass',
[UserSchema, UserMetaSchema])
def test_exclude_in_init(SchemaClass, user):
s = SchemaClass(exclude=('age', 'homepage')).dump(user)
assert 'homepage' not in s.data
assert 'age' not in s.data
assert 'name' in s.data
@pytest.mark.parametrize('SchemaClass',
[UserSchema, UserMetaSchema])
def test_only_in_init(SchemaClass, user):
s = SchemaClass(only=('name', 'age')).dump(user)
assert 'homepage' not in s.data
assert 'name' in s.data
assert 'age' in s.data
def test_invalid_only_param(user):
with pytest.raises(AttributeError):
UserSchema(only=("_invalid", "name")).dump(user)
def test_can_serialize_uuid(serialized_user, user):
assert serialized_user.data['uid'] == str(user.uid)
def test_can_serialize_time(user, serialized_user):
expected = user.time_registered.isoformat()[:15]
assert serialized_user.data['time_registered'] == expected
def test_invalid_time():
u = User('Joe', time_registered='foo')
s = UserSchema().dump(u)
assert '"foo" cannot be formatted as a time.' in s.errors['time_registered']
def test_invalid_date():
u = User("Joe", birthdate='foo')
s = UserSchema().dump(u)
assert '"foo" cannot be formatted as a date.' in s.errors['birthdate']
def test_invalid_email():
u = User('Joe', email='bademail')
s = UserSchema().dump(u)
assert 'email' in s.errors
assert 'Not a valid email address.' in s.errors['email'][0]
def test_invalid_url():
u = User('Joe', homepage='badurl')
s = UserSchema().dump(u)
assert 'homepage' in s.errors
assert 'Not a valid URL.' in s.errors['homepage'][0]
def test_invalid_dict_but_okay():
u = User('Joe', various_data='baddict')
s = UserSchema().dump(u)
assert 'various_data' not in s.errors
def test_json_module_is_deprecated():
with pytest.warns(DeprecationWarning):
class UserJSONSchema(Schema):
name = fields.String()
class Meta:
json_module = mockjson
user = User('Joe')
s = UserJSONSchema()
result, errors = s.dumps(user)
assert result == mockjson.dumps('val')
def test_render_module():
class UserJSONSchema(Schema):
name = fields.String()
class Meta:
render_module = mockjson
user = User('Joe')
s = UserJSONSchema()
result, errors = s.dumps(user)
assert result == mockjson.dumps('val')
def test_custom_error_message():
class ErrorSchema(Schema):
email = fields.Email(error_messages={'invalid': 'Invalid email'})
homepage = fields.Url(error_messages={'invalid': 'Bad homepage.'})
balance = fields.Decimal(error_messages={'invalid': 'Bad balance.'})
u = {'email': 'joe.net', 'homepage': 'joe@example.com', 'balance': 'blah'}
s = ErrorSchema()
data, errors = s.load(u)
assert "Bad balance." in errors['balance']
assert "Bad homepage." in errors['homepage']
assert "Invalid email" in errors['email']
def test_load_errors_with_many():
class ErrorSchema(Schema):
email = fields.Email()
data = [
{'email': 'bademail'},
{'email': 'goo@email.com'},
{'email': 'anotherbademail'},
]
data, errors = ErrorSchema(many=True).load(data)
assert 0 in errors
assert 2 in errors
assert 'Not a valid email address.' in errors[0]['email'][0]
assert 'Not a valid email address.' in errors[2]['email'][0]
def test_error_raised_if_fields_option_is_not_list():
with pytest.raises(ValueError):
class BadSchema(Schema):
name = fields.String()
class Meta:
fields = 'name'
def test_error_raised_if_additional_option_is_not_list():
with pytest.raises(ValueError):
class BadSchema(Schema):
name = fields.String()
class Meta:
additional = 'email'
def test_nested_only():
class ChildSchema(Schema):
foo = fields.Field()
bar = fields.Field()
baz = fields.Field()
class ParentSchema(Schema):
bla = fields.Field()
bli = fields.Field()
blubb = fields.Nested(ChildSchema)
sch = ParentSchema(only=('bla', 'blubb.foo', 'blubb.bar'))
data = dict(bla=1, bli=2, blubb=dict(foo=42, bar=24, baz=242))
result = sch.dump(data)
assert 'bla' in result.data
assert 'blubb' in result.data
assert 'bli' not in result.data
child = result.data['blubb']
assert 'foo' in child
assert 'bar' in child
assert 'baz' not in child
def test_nested_exclude():
class ChildSchema(Schema):
foo = fields.Field()
bar = fields.Field()
baz = fields.Field()
class ParentSchema(Schema):
bla = fields.Field()
bli = fields.Field()
blubb = fields.Nested(ChildSchema)
sch = ParentSchema(exclude=('bli', 'blubb.baz'))
data = dict(bla=1, bli=2, blubb=dict(foo=42, bar=24, baz=242))
result = sch.dump(data)
assert 'bla' in result.data
assert 'blubb' in result.data
assert 'bli' not in result.data
child = result.data['blubb']
assert 'foo' in child
assert 'bar' in child
assert 'baz' not in child
def test_nested_only_and_exclude_with_dot_notation():
class ChildSchema(Schema):
foo = fields.Field()
bar = fields.Field()
baz = fields.Field()
class ParentSchema(Schema):
bla = fields.Field()
bli = fields.Field()
blubb = fields.Nested(ChildSchema)
sch = ParentSchema(only=('bla', 'blubb.foo', 'blubb.bar'), exclude=('blubb.foo',))
data = dict(bla=1, bli=2, blubb=dict(foo=42, bar=24, baz=242))
result = sch.dump(data)
assert 'bla' in result.data
assert 'blubb' in result.data
assert 'bli' not in result.data
child = result.data['blubb']
assert 'foo' not in child
assert 'bar' in child
assert 'baz' not in child
def test_meta_nested_exclude():
class ChildSchema(Schema):
foo = fields.Field()
bar = fields.Field()
baz = fields.Field()
class ParentSchema(Schema):
bla = fields.Field()
bli = fields.Field()
blubb = fields.Nested(ChildSchema)
class Meta:
exclude = ('blubb.foo',)
sch = ParentSchema()
data = dict(bla=1, bli=2, blubb=dict(foo=42, bar=24, baz=242))
result = sch.dump(data)
assert 'bla' in result.data
assert 'blubb' in result.data
assert 'bli' in result.data
child = result.data['blubb']
assert 'foo' not in child
assert 'bar' in child
assert 'baz' in child
def test_deeply_nested_only_and_exclude():
class GrandChildSchema(Schema):
goo = fields.Field()
gah = fields.Field()
bah = fields.Field()
class ChildSchema(Schema):
foo = fields.Field()
bar = fields.Field()
flubb = fields.Nested(GrandChildSchema)
class ParentSchema(Schema):
bla = fields.Field()
bli = fields.Field()
blubb = fields.Nested(ChildSchema)
sch = ParentSchema(
only=('bla', 'blubb.foo', 'blubb.flubb.goo', 'blubb.flubb.gah'),
exclude=('blubb.flubb.goo',)
)
data = dict(bla=1, bli=2, blubb=dict(foo=3, bar=4, flubb=dict(goo=5, gah=6, bah=7)))
result = sch.dump(data)
assert 'bla' in result.data
assert 'blubb' in result.data
assert 'bli' not in result.data
child = result.data['blubb']
assert 'foo' in child
assert 'flubb' in child
assert 'bar' not in child
grand_child = child['flubb']
assert 'gah' in grand_child
assert 'goo' not in grand_child
assert 'bah' not in grand_child
class TestDeeplyNestedLoadOnly:
@pytest.fixture()
def schema(self):
class GrandChildSchema(Schema):
str_dump_only = fields.String()
str_load_only = fields.String()
str_regular = fields.String()
class ChildSchema(Schema):
str_dump_only = fields.String()
str_load_only = fields.String()
str_regular = fields.String()
grand_child = fields.Nested(GrandChildSchema)
class ParentSchema(Schema):
str_dump_only = fields.String()
str_load_only = fields.String()
str_regular = fields.String()
child = fields.Nested(ChildSchema)
return ParentSchema(
dump_only=('str_dump_only', 'child.str_dump_only', 'child.grand_child.str_dump_only'),
load_only=('str_load_only', 'child.str_load_only', 'child.grand_child.str_load_only'),
)
@pytest.fixture()
def data(self):
return dict(
str_dump_only='Dump Only',
str_load_only='Load Only',
str_regular='Regular String',
child=dict(
str_dump_only='Dump Only',
str_load_only='Load Only',
str_regular='Regular String',
grand_child=dict(
str_dump_only='Dump Only',
str_load_only='Load Only',
str_regular='Regular String',
)
)
)
def test_load_only(self, schema, data):
result = schema.dump(data)
assert not result.errors
assert 'str_load_only' not in result.data
assert 'str_dump_only' in result.data
assert 'str_regular' in result.data
child = result.data['child']
assert 'str_load_only' not in child
assert 'str_dump_only' in child
assert 'str_regular' in child
grand_child = child['grand_child']
assert 'str_load_only' not in grand_child
assert 'str_dump_only' in grand_child
assert 'str_regular' in grand_child
def test_dump_only(self, schema, data):
result = schema.load(data)
assert not result.errors
assert 'str_dump_only' not in result.data
assert 'str_load_only' in result.data
assert 'str_regular' in result.data
child = result.data['child']
assert 'str_dump_only' not in child
assert 'str_load_only' in child
assert 'str_regular' in child
grand_child = child['grand_child']
assert 'str_dump_only' not in grand_child
assert 'str_load_only' in grand_child
assert 'str_regular' in grand_child
class TestDeeplyNestedListLoadOnly:
@pytest.fixture()
def schema(self):
class ChildSchema(Schema):
str_dump_only = fields.String()
str_load_only = fields.String()
str_regular = fields.String()
class ParentSchema(Schema):
str_dump_only = fields.String()
str_load_only = fields.String()
str_regular = fields.String()
child = fields.List(fields.Nested(ChildSchema))
return ParentSchema(
dump_only=('str_dump_only', 'child.str_dump_only'),
load_only=('str_load_only', 'child.str_load_only'),
)
@pytest.fixture()
def data(self):
return dict(
str_dump_only='Dump Only',
str_load_only='Load Only',
str_regular='Regular String',
child=[dict(
str_dump_only='Dump Only',
str_load_only='Load Only',
str_regular='Regular String'
)]
)
def test_load_only(self, schema, data):
result = schema.dump(data)
assert not result.errors
assert 'str_load_only' not in result.data
assert 'str_dump_only' in result.data
assert 'str_regular' in result.data
child = result.data['child'][0]
assert 'str_load_only' not in child
assert 'str_dump_only' in child
assert 'str_regular' in child
def test_dump_only(self, schema, data):
result = schema.load(data)
assert not result.errors
assert 'str_dump_only' not in result.data
assert 'str_load_only' in result.data
assert 'str_regular' in result.data
child = result.data['child'][0]
assert 'str_dump_only' not in child
assert 'str_load_only' in child
assert 'str_regular' in child
def test_nested_constructor_only_and_exclude():
class GrandChildSchema(Schema):
goo = fields.Field()
gah = fields.Field()
bah = fields.Field()
class ChildSchema(Schema):
foo = fields.Field()
bar = fields.Field()
flubb = fields.Nested(GrandChildSchema)
class ParentSchema(Schema):
bla = fields.Field()
bli = fields.Field()
blubb = fields.Nested(
ChildSchema,
only=('foo', 'flubb.goo', 'flubb.gah'),
exclude=('flubb.goo',)
)
sch = ParentSchema(only=('bla', 'blubb'))
data = dict(bla=1, bli=2, blubb=dict(foo=3, bar=4, flubb=dict(goo=5, gah=6, bah=7)))
result = sch.dump(data)
assert 'bla' in result.data
assert 'blubb' in result.data
assert 'bli' not in result.data
child = result.data['blubb']
assert 'foo' in child
assert 'flubb' in child
assert 'bar' not in child
grand_child = child['flubb']
assert 'gah' in grand_child
assert 'goo' not in grand_child
assert 'bah' not in grand_child
def test_only_and_exclude():
class MySchema(Schema):
foo = fields.Field()
bar = fields.Field()
baz = fields.Field()
sch = MySchema(only=('foo', 'bar'), exclude=('bar', ))
data = dict(foo=42, bar=24, baz=242)
result = sch.dump(data)
assert 'foo' in result.data
assert 'bar' not in result.data
def test_exclude_invalid_attribute():
class MySchema(Schema):
foo = fields.Field()
sch = MySchema(exclude=('bar', ))
assert sch.dump({'foo': 42}).data == {'foo': 42}
def test_only_with_invalid_attribute():
class MySchema(Schema):
foo = fields.Field()
sch = MySchema(only=('bar', ))
with pytest.raises(KeyError) as excinfo:
sch.dump(dict(foo=42))
assert '"bar" is not a valid field' in str(excinfo.value.args[0])
def test_only_bounded_by_fields():
class MySchema(Schema):
class Meta:
fields = ('foo', )
sch = MySchema(only=('baz', ))
assert sch.dump({'foo': 42}).data == {}
def test_nested_only_and_exclude():
class Inner(Schema):
foo = fields.Field()
bar = fields.Field()
baz = fields.Field()
class Outer(Schema):
inner = fields.Nested(Inner, only=('foo', 'bar'), exclude=('bar', ))
sch = Outer()
data = dict(inner=dict(foo=42, bar=24, baz=242))
result = sch.dump(data)
assert 'foo' in result.data['inner']
assert 'bar' not in result.data['inner']
def test_nested_with_sets():
class Inner(Schema):
foo = fields.Field()
class Outer(Schema):
inners = fields.Nested(Inner, many=True)
sch = Outer()
DataClass = namedtuple('DataClass', ['foo'])
data = dict(inners=set([DataClass(42), DataClass(2)]))
result = sch.dump(data)
assert len(result.data['inners']) == 2
def test_meta_serializer_fields():
u = User("John", age=42.3, email="john@example.com",
homepage="http://john.com")
result = UserMetaSchema().dump(u)
assert not result.errors
assert result.data['name'] == u.name
assert result.data['balance'] == decimal.Decimal('100.00')
assert result.data['uppername'] == "JOHN"
assert result.data['is_old'] is False
assert result.data['created'] == utils.isoformat(u.created)
assert result.data['updated_local'] == utils.isoformat(u.updated, localtime=True)
assert result.data['finger_count'] == 10
assert result.data['various_data'] == dict(u.various_data)
def test_meta_fields_mapping(user):
s = UserMetaSchema()
s.dump(user) # need to call dump to update fields
assert type(s.fields['name']) == fields.String
assert type(s.fields['created']) == fields.DateTime
assert type(s.fields['updated']) == fields.DateTime
assert type(s.fields['updated_local']) == fields.LocalDateTime
assert type(s.fields['age']) == fields.Float
assert type(s.fields['balance']) == fields.Decimal
assert type(s.fields['registered']) == fields.Boolean
assert type(s.fields['sex_choices']) == fields.Raw
assert type(s.fields['hair_colors']) == fields.Raw
assert type(s.fields['finger_count']) == fields.Integer
assert type(s.fields['uid']) == fields.UUID
assert type(s.fields['time_registered']) == fields.Time
assert type(s.fields['birthdate']) == fields.Date
assert type(s.fields['since_created']) == fields.TimeDelta
def test_meta_field_not_on_obj_raises_attribute_error(user):
class BadUserSchema(Schema):
class Meta:
fields = ('name', 'notfound')
with pytest.raises(AttributeError):
BadUserSchema().dump(user)
def test_exclude_fields(user):
s = UserExcludeSchema().dump(user)
assert "created" not in s.data
assert "updated" not in s.data
assert "name" in s.data
def test_fields_option_must_be_list_or_tuple(user):
with pytest.raises(ValueError):
class BadFields(Schema):
class Meta:
fields = "name"
def test_exclude_option_must_be_list_or_tuple(user):
with pytest.raises(ValueError):
class BadExclude(Schema):
class Meta:
exclude = "name"
def test_dateformat_option(user):
fmt = '%Y-%m'
class DateFormatSchema(Schema):
updated = fields.DateTime("%m-%d")
class Meta:
fields = ('created', 'updated')
dateformat = fmt
serialized = DateFormatSchema().dump(user)
assert serialized.data['created'] == user.created.strftime(fmt)
assert serialized.data['updated'] == user.updated.strftime("%m-%d")
def test_default_dateformat(user):
class DateFormatSchema(Schema):
updated = fields.DateTime(format="%m-%d")
class Meta:
fields = ('created', 'updated')
serialized = DateFormatSchema().dump(user)
assert serialized.data['created'] == utils.isoformat(user.created)
assert serialized.data['updated'] == user.updated.strftime("%m-%d")
def test_inherit_meta(user):
class InheritedMetaSchema(UserMetaSchema):
pass
result = InheritedMetaSchema().dump(user).data
expected = UserMetaSchema().dump(user).data
assert result == expected
def test_inherit_meta_override():
class Parent(Schema):
class Meta:
strict = True
fields = ('name', 'email')
class Child(Schema):
class Meta(Parent.Meta):
strict = False
child = Child()
assert child.opts.fields == ('name', 'email')
assert child.opts.strict is False
def test_additional(user):
s = UserAdditionalSchema().dump(user)
assert s.data['lowername'] == user.name.lower()
assert s.data['name'] == user.name
def test_cant_set_both_additional_and_fields(user):
with pytest.raises(ValueError):
class BadSchema(Schema):
name = fields.String()
class Meta:
fields = ("name", 'email')
additional = ('email', 'homepage')
def test_serializing_none_meta():
s = UserMetaSchema().dump(None)
assert s.data == {}
assert s.errors == {}
class CustomError(Exception):
pass
class MySchema(Schema):
name = fields.String()
email = fields.Email()
age = fields.Integer()
def handle_error(self, errors, obj):
raise CustomError('Something bad happened')
class TestHandleError:
def test_dump_with_custom_error_handler(self, user):
user.age = 'notavalidage'
with pytest.raises(CustomError):
MySchema().dump(user)
def test_dump_with_custom_error_handler_and_strict(self, user):
user.age = 'notavalidage'
with pytest.raises(CustomError):
MySchema(strict=True).dump(user)
def test_load_with_custom_error_handler(self):
in_data = {'email': 'invalid'}
class MySchema3(Schema):
email = fields.Email()
def handle_error(self, error, data):
assert type(error) is ValidationError
assert 'email' in error.messages
assert error.field_names == ['email']
assert error.fields == [self.fields['email']]
assert data == in_data
raise CustomError('Something bad happened')
with pytest.raises(CustomError):
MySchema3().load(in_data)
def test_load_with_custom_error_handler_and_partially_valid_data(self):
in_data = {'email': 'invalid', 'url': 'http://valid.com'}
class MySchema(Schema):
email = fields.Email()
url = fields.URL()
def handle_error(self, error, data):
assert type(error) is ValidationError
assert 'email' in error.messages
assert error.field_names == ['email']
assert error.fields == [self.fields['email']]
assert data == in_data
raise CustomError('Something bad happened')
with pytest.raises(CustomError):
MySchema().load(in_data)
def test_custom_error_handler_with_validates_decorator(self):
in_data = {'num': -1}
class MySchema(Schema):
num = fields.Int()
@validates('num')
def validate_num(self, value):
if value < 0:
raise ValidationError('Must be greater than 0.')
def handle_error(self, error, data):
assert type(error) is ValidationError
assert 'num' in error.messages
assert error.field_names == ['num']
assert error.fields == [self.fields['num']]
assert data == in_data
raise CustomError('Something bad happened')
with pytest.raises(CustomError):
MySchema().load(in_data)
def test_custom_error_handler_with_validates_schema_decorator(self):
in_data = {'num': -1}
class MySchema(Schema):
num = fields.Int()
@validates_schema
def validates_schema(self, data):
raise ValidationError('Invalid schema!')
def handle_error(self, error, data):
assert type(error) is ValidationError
assert '_schema' in error.messages
assert error.field_names == ['_schema']
assert error.fields == []
assert data == in_data
raise CustomError('Something bad happened')
with pytest.raises(CustomError):
MySchema().load(in_data)
def test_validate_with_custom_error_handler(self):
with pytest.raises(CustomError):
MySchema().validate({'age': 'notvalid', 'email': 'invalid'})
class TestFieldValidation:
def test_errors_are_cleared_after_loading_collection(self):
def always_fail(val):
raise ValidationError('lol')
class MySchema(Schema):
foo = fields.Str(validate=always_fail)
schema = MySchema()
_, errors = schema.load([
{'foo': 'bar'},
{'foo': 'baz'}
], many=True)
assert len(errors[0]['foo']) == 1
assert len(errors[1]['foo']) == 1
_, errors2 = schema.load({'foo': 'bar'})
assert len(errors2['foo']) == 1
def test_raises_error_with_list(self):
def validator(val):
raise ValidationError(['err1', 'err2'])
class MySchema(Schema):
foo = fields.Field(validate=validator)
s = MySchema()
errors = s.validate({'foo': 42})
assert errors['foo'] == ['err1', 'err2']
# https://github.com/marshmallow-code/marshmallow/issues/110
def test_raises_error_with_dict(self):
def validator(val):
raise ValidationError({'code': 'invalid_foo'})
class MySchema(Schema):
foo = fields.Field(validate=validator)
s = MySchema()
errors = s.validate({'foo': 42})
assert errors['foo'] == [{'code': 'invalid_foo'}]
def test_ignored_if_not_in_only(self):
class MySchema(Schema):
a = fields.Field()
b = fields.Field()
@validates('a')
def validate_a(self, val):
raise ValidationError({'code': 'invalid_a'})
@validates('b')
def validate_b(self, val):
raise ValidationError({'code': 'invalid_b'})
s = MySchema(only=('b',))
errors = s.validate({'b': 'data'})
assert errors == {'b': {'code': 'invalid_b'}}
def test_schema_repr():
class MySchema(Schema):
name = fields.String()
ser = MySchema(many=True, strict=True)
rep = repr(ser)
assert 'MySchema' in rep
assert 'strict=True' in rep
assert 'many=True' in rep
class TestNestedSchema:
@pytest.fixture
def user(self):
return User(name="Monty", age=81)
@pytest.fixture
def blog(self, user):
col1 = User(name="Mick", age=123)
col2 = User(name="Keith", age=456)
blog = Blog("Monty's blog", user=user, categories=["humor", "violence"],
collaborators=[col1, col2])
return blog
def test_flat_nested(self, blog):
class FlatBlogSchema(Schema):
name = fields.String()
user = fields.Nested(UserSchema, only='name')
collaborators = fields.Nested(UserSchema, only='name', many=True)
s = FlatBlogSchema()
data, _ = s.dump(blog)
assert data['user'] == blog.user.name
for i, name in enumerate(data['collaborators']):
assert name == blog.collaborators[i].name
# regression test for https://github.com/marshmallow-code/marshmallow/issues/64
def test_nested_many_with_missing_attribute(self, user):
class SimpleBlogSchema(Schema):
title = fields.Str()
wat = fields.Nested(UserSchema, many=True)
blog = Blog('Simple blog', user=user, collaborators=None)
schema = SimpleBlogSchema()
result = schema.dump(blog)
assert 'wat' not in result.data
def test_nested_with_attribute_none(self):
class InnerSchema(Schema):
bar = fields.Field()
class MySchema(Schema):
foo = fields.Nested(InnerSchema)
class MySchema2(Schema):
foo = fields.Nested(InnerSchema)
s = MySchema()
result = s.dump({'foo': None})
assert result.data['foo'] is None
s2 = MySchema2()
result2 = s2.dump({'foo': None})
assert result2.data['foo'] is None
def test_flat_nested2(self, blog):
class FlatBlogSchema(Schema):
name = fields.String()
collaborators = fields.Nested(UserSchema, many=True, only='uid')
s = FlatBlogSchema()
data, _ = s.dump(blog)
assert data['collaborators'][0] == str(blog.collaborators[0].uid)
def test_nested_field_does_not_validate_required(self):
class BlogRequiredSchema(Schema):
user = fields.Nested(UserSchema, required=True)
b = Blog('Authorless blog', user=None)
_, errs = BlogRequiredSchema().dump(b)
assert 'user' not in errs
def test_nested_required_errors_with_load_from(self):
class DatesInfoSchema(Schema):
created = fields.DateTime(required=True)
updated = fields.DateTime(required=True)
class UserSimpleSchema(Schema):
name = fields.String(required=True)
time_registered = fields.Time(load_from='timeRegistered', required=True)
dates_info = fields.Nested(DatesInfoSchema, load_from='datesInfo', required=True)
class BlogRequiredSchema(Schema):
user = fields.Nested(UserSimpleSchema, required=True)
_, errs = BlogRequiredSchema().load({})
assert 'timeRegistered' in errs['user']
assert 'datesInfo' in errs['user']
def test_nested_none(self):
class BlogDefaultSchema(Schema):
user = fields.Nested(UserSchema, default=0)
b = Blog('Just the default blog', user=None)
data, _ = BlogDefaultSchema().dump(b)
assert data['user'] is None
def test_nested(self, user, blog):
blog_serializer = BlogSchema()
serialized_blog, _ = blog_serializer.dump(blog)
user_serializer = UserSchema()
serialized_user, _ = user_serializer.dump(user)
assert serialized_blog['user'] == serialized_user
def test_nested_many_fields(self, blog):
serialized_blog, _ = BlogSchema().dump(blog)
expected = [UserSchema().dump(col)[0] for col in blog.collaborators]
assert serialized_blog['collaborators'] == expected
def test_nested_meta_many(self, blog):
serialized_blog = BlogUserMetaSchema().dump(blog)[0]
assert len(serialized_blog['collaborators']) == 2
expected = [UserMetaSchema().dump(col)[0] for col in blog.collaborators]
assert serialized_blog['collaborators'] == expected
def test_nested_only(self, blog):
col1 = User(name="Mick", age=123, id_="abc")
col2 = User(name="Keith", age=456, id_="def")
blog.collaborators = [col1, col2]
serialized_blog = BlogOnlySchema().dump(blog)[0]
assert serialized_blog['collaborators'] == [{"id": col1.id}, {"id": col2.id}]
def test_exclude(self, blog):
serialized = BlogSchemaExclude().dump(blog)[0]
assert "uppername" not in serialized['user'].keys()
def test_list_field(self, blog):
serialized = BlogSchema().dump(blog)[0]
assert serialized['categories'] == ["humor", "violence"]
def test_nested_load_many(self):
in_data = {'title': 'Shine A Light', 'collaborators': [
{'name': 'Mick', 'email': 'mick@stones.com'},
{'name': 'Keith', 'email': 'keith@stones.com'}
]}
data, errors = BlogSchema().load(in_data)
collabs = data['collaborators']
assert len(collabs) == 2
assert all(type(each) == User for each in collabs)
assert collabs[0].name == in_data['collaborators'][0]['name']
def test_nested_errors(self):
_, errors = BlogSchema().load(
{'title': "Monty's blog", 'user': {'name': 'Monty', 'email': 'foo'}}
)
assert "email" in errors['user']
assert len(errors['user']['email']) == 1
assert 'Not a valid email address.' in errors['user']['email'][0]
# No problems with collaborators
assert "collaborators" not in errors
def test_nested_strict(self):
with pytest.raises(ValidationError) as excinfo:
_, errors = BlogSchema(strict=True).load(
{'title': "Monty's blog", 'user': {'name': 'Monty', 'email': 'foo'}}
)
assert 'email' in str(excinfo)
def test_nested_dump_errors(self, blog):
blog.user.email = "foo"
_, errors = BlogSchema().dump(blog)
assert "email" in errors['user']
assert len(errors['user']['email']) == 1
assert 'Not a valid email address.' in errors['user']['email'][0]
# No problems with collaborators
assert "collaborators" not in errors
def test_nested_dump_strict(self, blog):
blog.user.email = "foo"
with pytest.raises(ValidationError) as excinfo:
_, errors = BlogSchema(strict=True).dump(blog)
assert 'email' in str(excinfo)
def test_nested_method_field(self, blog):
data = BlogSchema().dump(blog)[0]
assert data['user']['is_old']
assert data['collaborators'][0]['is_old']
def test_nested_function_field(self, blog, user):
data = BlogSchema().dump(blog)[0]
assert data['user']['lowername'] == user.name.lower()
expected = blog.collaborators[0].name.lower()
assert data['collaborators'][0]['lowername'] == expected
def test_nested_prefixed_field(self, blog, user):
data = BlogSchemaPrefixedUser().dump(blog)[0]
assert data['user']['usr_name'] == user.name
assert data['user']['usr_lowername'] == user.name.lower()
def test_nested_prefixed_many_field(self, blog):
data = BlogSchemaPrefixedUser().dump(blog)[0]
assert data['collaborators'][0]['usr_name'] == blog.collaborators[0].name
def test_invalid_float_field(self):
user = User("Joe", age="1b2")
_, errors = UserSchema().dump(user)
assert "age" in errors
def test_serializer_meta_with_nested_fields(self, blog, user):
data = BlogSchemaMeta().dump(blog)[0]
assert data['title'] == blog.title
assert data['user'] == UserSchema().dump(user).data
assert data['collaborators'] == [UserSchema().dump(c).data
for c in blog.collaborators]
assert data['categories'] == blog.categories
def test_serializer_with_nested_meta_fields(self, blog):
# Schema has user = fields.Nested(UserMetaSerializer)
s = BlogUserMetaSchema().dump(blog)
assert s.data['user'] == UserMetaSchema().dump(blog.user).data
def test_nested_fields_must_be_passed_a_serializer(self, blog):
class BadNestedFieldSchema(BlogSchema):
user = fields.Nested(fields.String)
with pytest.raises(ValueError):
BadNestedFieldSchema().dump(blog)
# regression test for https://github.com/marshmallow-code/marshmallow/issues/188
def test_invalid_type_passed_to_nested_field(self):
class InnerSchema(Schema):
foo = fields.Field()
class MySchema(Schema):
inner = fields.Nested(InnerSchema, many=True)
sch = MySchema()
result = sch.load({'inner': [{'foo': 42}]})
assert not result.errors
result = sch.load({'inner': 'invalid'})
assert 'inner' in result.errors
assert result.errors['inner'] == ['Invalid type.']
class OuterSchema(Schema):
inner = fields.Nested(InnerSchema)
schema = OuterSchema()
_, errors = schema.load({'inner': 1})
assert errors['inner']['_schema'] == ['Invalid input type.']
# regression test for https://github.com/marshmallow-code/marshmallow/issues/298
def test_all_errors_on_many_nested_field_with_validates_decorator(self):
class Inner(Schema):
req = fields.Field(required=True)
class Outer(Schema):
inner = fields.Nested(Inner, many=True)
@validates('inner')
def validates_inner(self, data):
raise ValidationError('not a chance')
outer = Outer()
_, errors = outer.load({'inner': [{}]})
assert 'inner' in errors
assert '_field' in errors['inner']
def test_missing_required_nested_field(self):
class Inner(Schema):
inner_req = fields.Field(required=True, error_messages={'required': 'Oops'})
inner_not_req = fields.Field()
inner_bad = fields.Integer(required=True, error_messages={'required': 'Int plz'})
class Middle(Schema):
middle_many_req = fields.Nested(Inner, required=True, many=True)
middle_req_2 = fields.Nested(Inner, required=True)
middle_not_req = fields.Nested(Inner)
middle_field = fields.Field(required=True, error_messages={'required': 'middlin'})
class Outer(Schema):
outer_req = fields.Nested(Middle, required=True)
outer_many_req = fields.Nested(Middle, required=True, many=True)
outer_not_req = fields.Nested(Middle)
outer_many_not_req = fields.Nested(Middle, many=True)
outer = Outer()
expected = {
'outer_many_req': {0: {'middle_many_req': {0: {'inner_bad': ['Int plz'],
'inner_req': ['Oops']}},
'middle_req_2': {'inner_bad': ['Int plz'],
'inner_req': ['Oops']},
'middle_field': ['middlin']}},
'outer_req': {'middle_field': ['middlin'],
'middle_many_req': {0: {'inner_bad': ['Int plz'],
'inner_req': ['Oops']}},
'middle_req_2': {'inner_bad': ['Int plz'],
'inner_req': ['Oops']}}}
data, errors = outer.load({})
assert errors == expected
class TestSelfReference:
@pytest.fixture
def employer(self):
return User(name="Joe", age=59)
@pytest.fixture
def user(self, employer):
return User(name="Tom", employer=employer, age=28)
def test_nesting_schema_within_itself(self, user, employer):
class SelfSchema(Schema):
name = fields.String()
age = fields.Integer()
employer = fields.Nested('self', exclude=('employer', ))
data, errors = SelfSchema().dump(user)
assert not errors
assert data['name'] == user.name
assert data['employer']['name'] == employer.name
assert data['employer']['age'] == employer.age
def test_nesting_schema_by_passing_class_name(self, user, employer):
class SelfReferencingSchema(Schema):
name = fields.Str()
age = fields.Int()
employer = fields.Nested('SelfReferencingSchema', exclude=('employer',))
data, errors = SelfReferencingSchema().dump(user)
assert not errors
assert data['name'] == user.name
assert data['employer']['name'] == employer.name
assert data['employer']['age'] == employer.age
def test_nesting_within_itself_meta(self, user, employer):
class SelfSchema(Schema):
employer = fields.Nested("self", exclude=('employer', ))
class Meta:
additional = ('name', 'age')
data, errors = SelfSchema().dump(user)
assert not errors
assert data['name'] == user.name
assert data['age'] == user.age
assert data['employer']['name'] == employer.name
assert data['employer']['age'] == employer.age
def test_recursive_missing_required_field(self):
class BasicSchema(Schema):
sub_basics = fields.Nested("self", required=True)
data, errors = BasicSchema().load({})
assert data == {}
assert errors == {
'sub_basics': ['Missing data for required field.']
}
def test_recursive_missing_required_field_one_level_in(self):
class BasicSchema(Schema):
sub_basics = fields.Nested("self", required=True, exclude=('sub_basics', ))
simple_field = fields.Str(required=True)
class DeepSchema(Schema):
basic = fields.Nested(BasicSchema(), required=True)
data, errors = DeepSchema().load({})
assert data == {}
assert errors == {
'basic': {
'sub_basics': [u'Missing data for required field.'],
'simple_field': [u'Missing data for required field.'],
}
}
partially_valid = {
'basic': {'sub_basics': {'simple_field': 'foo'}}
}
data, errors = DeepSchema().load(partially_valid)
assert data == partially_valid
assert errors == {
'basic': {
'simple_field': [u'Missing data for required field.'],
}
}
partially_valid2 = {
'basic': {'simple_field': 'foo'}
}
data, errors = DeepSchema().load(partially_valid2)
assert data == partially_valid2
assert errors == {
'basic': {
'sub_basics': ['Missing data for required field.'],
}
}
def test_nested_self_with_only_param(self, user, employer):
class SelfSchema(Schema):
employer = fields.Nested('self', only=('name', ))
class Meta:
fields = ('name', 'employer')
data = SelfSchema().dump(user)[0]
assert data['name'] == user.name
assert data['employer']['name'] == employer.name
assert 'age' not in data['employer']
def test_multiple_nested_self_fields(self, user):
class MultipleSelfSchema(Schema):
emp = fields.Nested('self', only='name', attribute='employer')
rels = fields.Nested('self', only='name',
many=True, attribute='relatives')
class Meta:
fields = ('name', 'emp', 'rels')
schema = MultipleSelfSchema()
user.relatives = [User(name="Bar", age=12), User(name='Baz', age=34)]
data, errors = schema.dump(user)
assert not errors
assert len(data['rels']) == len(user.relatives)
relative = data['rels'][0]
assert relative == user.relatives[0].name
def test_nested_many(self):
class SelfManySchema(Schema):
relatives = fields.Nested('self', many=True)
class Meta:
additional = ('name', 'age')
person = User(name='Foo')
person.relatives = [User(name="Bar", age=12), User(name='Baz', age=34)]
data = SelfManySchema().dump(person)[0]
assert data['name'] == person.name
assert len(data['relatives']) == len(person.relatives)
assert data['relatives'][0]['name'] == person.relatives[0].name
assert data['relatives'][0]['age'] == person.relatives[0].age
class RequiredUserSchema(Schema):
name = fields.Field(required=True)
def test_serialization_with_required_field():
user = User(name=None)
data, errors = RequiredUserSchema().dump(user)
# Does not validate required
assert 'name' not in errors
def test_deserialization_with_required_field():
in_data = {}
data, errors = RequiredUserSchema().load(in_data)
assert 'name' in errors
assert 'Missing data for required field.' in errors['name']
# field value should also not be in output data
assert 'name' not in data
def test_deserialization_with_required_field_and_custom_validator():
class ValidatingSchema(Schema):
color = fields.String(required=True,
validate=lambda x: x.lower() == 'red' or x.lower() == 'blue',
error_messages={
'validator_failed': "Color must be red or blue"})
data, errors = ValidatingSchema().load({'name': 'foo'})
assert errors
assert 'color' in errors
assert "Missing data for required field." in errors['color']
_, errors = ValidatingSchema().load({'color': 'green'})
assert 'color' in errors
assert "Color must be red or blue" in errors['color']
class UserContextSchema(Schema):
is_owner = fields.Method('get_is_owner')
is_collab = fields.Function(lambda user, ctx: user in ctx['blog'])
def get_is_owner(self, user):
return self.context['blog'].user.name == user.name
class TestContext:
def test_context_method(self):
owner = User('Joe')
blog = Blog(title='Joe Blog', user=owner)
context = {'blog': blog}
serializer = UserContextSchema()
serializer.context = context
data = serializer.dump(owner)[0]
assert data['is_owner'] is True
nonowner = User('Fred')
data = serializer.dump(nonowner)[0]
assert data['is_owner'] is False
def test_context_method_function(self):
owner = User('Fred')
blog = Blog('Killer Queen', user=owner)
collab = User('Brian')
blog.collaborators.append(collab)
context = {'blog': blog}
serializer = UserContextSchema()
serializer.context = context
data = serializer.dump(collab)[0]
assert data['is_collab'] is True
noncollab = User('Foo')
data = serializer.dump(noncollab)[0]
assert data['is_collab'] is False
def test_function_field_raises_error_when_context_not_available(self):
# only has a function field
class UserFunctionContextSchema(Schema):
is_collab = fields.Function(lambda user, ctx: user in ctx['blog'])
owner = User('Joe')
serializer = UserFunctionContextSchema(strict=True)
# no context
serializer.context = None
with pytest.raises(ValidationError) as excinfo:
serializer.dump(owner)
msg = 'No context available for Function field {0!r}'.format('is_collab')
assert msg in str(excinfo)
def test_function_field_handles_bound_serializer(self):
class SerializeA(object):
def __call__(self, value):
return 'value'
serialize = SerializeA()
# only has a function field
class UserFunctionContextSchema(Schema):
is_collab = fields.Function(serialize)
owner = User('Joe')
serializer = UserFunctionContextSchema(strict=True)
# no context
serializer.context = None
data = serializer.dump(owner)[0]
assert data['is_collab'] is 'value'
def test_fields_context(self):
class CSchema(Schema):
name = fields.String()
ser = CSchema()
ser.context['foo'] = 42
assert ser.fields['name'].context == {'foo': 42}
def test_nested_fields_inherit_context(self):
class InnerSchema(Schema):
likes_bikes = fields.Function(lambda obj, ctx: 'bikes' in ctx['info'])
class CSchema(Schema):
inner = fields.Nested(InnerSchema)
ser = CSchema(strict=True)
ser.context['info'] = 'i like bikes'
obj = {
'inner': {}
}
result = ser.dump(obj)
assert result.data['inner']['likes_bikes'] is True
def test_serializer_can_specify_nested_object_as_attribute(blog):
class BlogUsernameSchema(Schema):
author_name = fields.String(attribute='user.name')
ser = BlogUsernameSchema()
result = ser.dump(blog)
assert result.data['author_name'] == blog.user.name
class TestFieldInheritance:
def test_inherit_fields_from_schema_subclass(self):
expected = OrderedDict([
('field_a', fields.Number()),
('field_b', fields.Number()),
])
class SerializerA(Schema):
field_a = expected['field_a']
class SerializerB(SerializerA):
field_b = expected['field_b']
assert SerializerB._declared_fields == expected
def test_inherit_fields_from_non_schema_subclass(self):
expected = OrderedDict([
('field_a', fields.Number()),
('field_b', fields.Number()),
])
class PlainBaseClass(object):
field_a = expected['field_a']
class SerializerB1(Schema, PlainBaseClass):
field_b = expected['field_b']
class SerializerB2(PlainBaseClass, Schema):
field_b = expected['field_b']
assert SerializerB1._declared_fields == expected
assert SerializerB2._declared_fields == expected
def test_inheritance_follows_mro(self):
expected = OrderedDict([
('field_a', fields.String()),
('field_c', fields.String()),
('field_b', fields.String()),
('field_d', fields.String()),
])
# Diamond inheritance graph
# MRO: D -> B -> C -> A
class SerializerA(Schema):
field_a = expected['field_a']
class SerializerB(SerializerA):
field_b = expected['field_b']
class SerializerC(SerializerA):
field_c = expected['field_c']
class SerializerD(SerializerB, SerializerC):
field_d = expected['field_d']
assert SerializerD._declared_fields == expected
def get_from_dict(schema, obj, key, default=None):
return obj.get('_' + key, default)
class TestGetAttribute:
def test_get_attribute_is_used(self):
class UserDictSchema(Schema):
name = fields.Str()
email = fields.Email()
def get_attribute(self, obj, attr, default):
return get_from_dict(self, obj, attr, default)
user_dict = {'_name': 'joe', '_email': 'joe@shmoe.com'}
schema = UserDictSchema()
result = schema.dump(user_dict)
assert result.data['name'] == user_dict['_name']
assert result.data['email'] == user_dict['_email']
assert not result.errors
# can't serialize User object
user = User(name='joe', email='joe@shmoe.com')
with pytest.raises(AttributeError):
schema.dump(user)
def test_get_attribute_with_many(self):
class UserDictSchema(Schema):
name = fields.Str()
email = fields.Email()
def get_attribute(self, obj, attr, default):
return get_from_dict(self, obj, attr, default)
user_dicts = [{'_name': 'joe', '_email': 'joe@shmoe.com'},
{'_name': 'jane', '_email': 'jane@shmane.com'}]
schema = UserDictSchema(many=True)
results = schema.dump(user_dicts)
for result, user_dict in zip(results.data, user_dicts):
assert result['name'] == user_dict['_name']
assert result['email'] == user_dict['_email']
assert not results.errors
# can't serialize User object
users = [User(name='joe', email='joe@shmoe.com'),
User(name='jane', email='jane@shmane.com')]
with pytest.raises(AttributeError):
schema.dump(users)
class TestRequiredFields:
class StringSchema(Schema):
required_field = fields.Str(required=True)
allow_none_field = fields.Str(allow_none=True)
allow_none_required_field = fields.Str(required=True, allow_none=True)
@pytest.fixture()
def string_schema(self):
return self.StringSchema()
@pytest.fixture()
def data(self):
return dict(
required_field='foo',
allow_none_field='bar',
allow_none_required_field='one',
)
def test_required_string_field_missing(self, string_schema, data):
del data['required_field']
errors = string_schema.validate(data)
assert errors['required_field'] == ['Missing data for required field.']
def test_required_string_field_failure(self, string_schema, data):
data['required_field'] = None
errors = string_schema.validate(data)
assert errors['required_field'] == ['Field may not be null.']
def test_allow_none_param(self, string_schema, data):
data['allow_none_field'] = None
errors = string_schema.validate(data)
assert 'allow_none_field' not in errors
data['allow_none_required_field'] = None
errors = string_schema.validate(data)
assert 'allow_none_required_field' not in errors
del data['allow_none_required_field']
errors = string_schema.validate(data)
assert 'allow_none_required_field' in errors
def test_allow_none_custom_message(self, data):
class MySchema(Schema):
allow_none_field = fields.Field(allow_none=False,
error_messages={'null': '<custom>'})
schema = MySchema()
errors = schema.validate({'allow_none_field': None})
assert errors['allow_none_field'][0] == '<custom>'
class TestDefaults:
class MySchema(Schema):
int_no_default = fields.Int(allow_none=True)
str_no_default = fields.Str(allow_none=True)
list_no_default = fields.List(fields.Str, allow_none=True)
nested_no_default = fields.Nested(UserSchema, many=True, allow_none=True)
int_with_default = fields.Int(allow_none=True, default=42)
str_with_default = fields.Str(allow_none=True, default='foo')
@pytest.fixture()
def schema(self):
return self.MySchema()
@pytest.fixture()
def data(self):
return dict(
int_no_default=None,
str_no_default=None,
list_no_default=None,
nested_no_default=None,
int_with_default=None,
str_with_default=None,
)
def test_missing_inputs_are_excluded_from_dump_output(self, schema, data):
for key in ['int_no_default', 'str_no_default',
'list_no_default', 'nested_no_default']:
d = data.copy()
del d[key]
result = schema.dump(d)
# the missing key is not in the serialized result
assert key not in result.data
# the rest of the keys are in the result.data
assert all(k in result.data for k in d.keys())
def test_none_is_serialized_to_none(self, schema, data):
assert schema.validate(data) == {}
result = schema.dump(data)
for key in data.keys():
msg = 'result.data[{0!r}] should be None'.format(key)
assert result.data[key] is None, msg
def test_default_and_value_missing(self, schema, data):
del data['int_with_default']
del data['str_with_default']
result = schema.dump(data)
assert result.data['int_with_default'] == 42
assert result.data['str_with_default'] == 'foo'
def test_loading_none(self, schema, data):
result = schema.load(data)
assert not result.errors
for key in data.keys():
result.data[key] is None
def test_missing_inputs_are_excluded_from_load_output(self, schema, data):
for key in ['int_no_default', 'str_no_default',
'list_no_default', 'nested_no_default']:
d = data.copy()
del d[key]
result = schema.load(d)
# the missing key is not in the deserialized result
assert key not in result.data
# the rest of the keys are in the result.data
assert all(k in result.data for k in d.keys())
class TestLoadOnly:
class MySchema(Schema):
class Meta:
load_only = ('str_load_only',)
dump_only = ('str_dump_only',)
str_dump_only = fields.String()
str_load_only = fields.String()
str_regular = fields.String()
@pytest.fixture()
def schema(self):
return self.MySchema()
@pytest.fixture()
def data(self):
return dict(
str_dump_only='Dump Only',
str_load_only='Load Only',
str_regular='Regular String')
def test_load_only(self, schema, data):
result = schema.dump(data)
assert not result.errors
assert 'str_load_only' not in result.data
assert 'str_dump_only' in result.data
assert 'str_regular' in result.data
def test_dump_only(self, schema, data):
result = schema.load(data)
assert not result.errors
assert 'str_dump_only' not in result.data
assert 'str_load_only' in result.data
assert 'str_regular' in result.data
class TestStrictDefault:
class SchemaTrueByMeta(Schema):
class Meta:
strict = True
class SchemaFalseByMeta(Schema):
class Meta:
strict = False
class SchemaWithoutMeta(Schema):
pass
def test_default(self):
assert self.SchemaWithoutMeta().strict is False
def test_meta_true(self):
assert self.SchemaTrueByMeta().strict is True
def test_meta_false(self):
assert self.SchemaFalseByMeta().strict is False
def test_default_init_true(self):
assert self.SchemaWithoutMeta(strict=True).strict is True
def test_default_init_false(self):
assert self.SchemaWithoutMeta(strict=False).strict is False
def test_meta_true_init_true(self):
assert self.SchemaTrueByMeta(strict=True).strict is True
def test_meta_true_init_false(self):
assert self.SchemaTrueByMeta(strict=False).strict is False
def test_meta_false_init_true(self):
assert self.SchemaFalseByMeta(strict=True).strict is True
def test_meta_false_init_false(self):
assert self.SchemaFalseByMeta(strict=False).strict is False
|
xLegoz/marshmallow
|
tests/test_schema.py
|
Python
|
mit
| 76,291
|
[
"Brian"
] |
fe7e6324e5986ea2a24317fa578b0fab840fc353074f51102216631b04bd4822
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import glob
import json
import mmap
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
parser.add_argument("--rootdir", default=rootdir, help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
parser.add_argument("--boilerplate-dir", default=default_boilerplate_dir)
args = parser.parse_args()
def get_refs():
refs = {}
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except:
return False
data = f.read()
f.close()
basename = os.path.basename(filename)
extension = file_extension(filename)
if extension != "":
ref = refs[extension]
else:
ref = refs[basename]
# remove build tags from the top of Go files
if extension == "go":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
# remove shebang from the top of shell files
if extension == "sh":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
return False
# Replace all occurrences of the regex "2016|2015|2014" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
p = regexs["kubernetes_copyright"]
for i, d in enumerate(data):
(data[i], found) = p.subn('COPYRIGHTHOLDER', d)
if found != 0:
break
p = regexs["redhat_copyright"]
for i, d in enumerate(data):
(data[i], found) = p.subn('COPYRIGHTHOLDER', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git', 'cluster/env.sh', "vendor", "out"]
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(args.rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile( 'YEAR' )
# dates can be 2014 to 2018, company holder names can be anything
regexs["date"] = re.compile( '(2014|2015|2016|2017|2018)' )
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
# Search for Kubernetes copyright notice
regexs["kubernetes_copyright"] = re.compile(r"(Copyright YEAR The Kubernetes Authors All rights reserved)\n*", re.MULTILINE)
# Search for Red Hat copyright notice
regexs["redhat_copyright"] = re.compile(r"(Copyright \(C\) YEAR Red Hat, Inc)\n*", re.MULTILINE)
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
if __name__ == "__main__":
sys.exit(main())
|
jimmidyson/minishift
|
scripts/boilerplate/boilerplate.py
|
Python
|
apache-2.0
| 5,868
|
[
"VisIt"
] |
c537b2fb196549fa939f7ebd14037a38d3e5356669b46b662fe994ff3cfc4c1b
|
#!/usr/bin/env python
# CHANGED:2015-03-05 17:53:32 by Brian McFee <brian.mcfee@nyu.edu>
"""Test the util module"""
import tempfile
import os
import pytest
import numpy as np
from jams import core, util
import six
def srand(seed=628318530):
np.random.seed(seed)
pass
@pytest.mark.parametrize('ns, lab, ints, y, infer_duration',
[('beat',
"1.0 1\n3.0 2",
np.array([[1.0, 3.0], [3.0, 3.0]]),
[1, 2],
True),
('beat',
"1.0 1\n3.0 2",
np.array([[1.0, 1.0], [3.0, 3.0]]),
[1, 2],
False),
('chord_harte',
"1.0 2.0 a\n2.0 4.0 b",
np.array([[1.0, 2.0], [2.0, 4.0]]),
['a', 'b'],
True),
('chord',
"1.0 1.0 c\n2.0 2.0 d",
np.array([[1.0, 2.0], [2.0, 4.0]]),
['c', 'd'],
False)])
def test_import_lab(ns, lab, ints, y, infer_duration):
ann = util.import_lab(ns, six.StringIO(lab),
infer_duration=infer_duration)
assert len(ints) == len(ann.data)
assert len(y) == len(ann.data)
for yi, ival, obs in zip(y, ints, ann):
assert obs.time == ival[0]
assert obs.duration == ival[1] - ival[0]
assert obs.value == yi
@pytest.mark.parametrize('query, prefix, sep, target',
[('al.beta.gamma', 'al', '.', 'beta.gamma'),
('al/beta/gamma', 'al', '/', 'beta/gamma'),
('al.beta.gamma', 'beta', '.', 'al.beta.gamma'),
('al.beta.gamma', 'beta', '/', 'al.beta.gamma'),
('al.pha.beta.gamma', 'al', '.', 'pha.beta.gamma')])
def test_query_pop(query, prefix, sep, target):
assert target == core.query_pop(query, prefix, sep=sep)
@pytest.mark.parametrize('needle, haystack, result',
[('abcdeABCDE123', 'abcdeABCDE123', True),
('.*cde.*', 'abcdeABCDE123', True),
('cde$', 'abcdeABCDE123', False),
(r'.*\d+$', 'abcdeABCDE123', True),
(r'^\d+$', 'abcdeABCDE123', False),
(lambda x: True, 'abcdeABCDE123', True),
(lambda x: False, 'abcdeABCDE123', False),
(5, 5, True),
(5, 4, False)])
def test_match_query(needle, haystack, result):
assert result == core.match_query(haystack, needle)
def test_smkdirs():
root = tempfile.mkdtemp()
my_dirs = [root, 'level1', 'level2', 'level3']
try:
target = os.sep.join(my_dirs)
util.smkdirs(target)
for i in range(1, len(my_dirs)):
tmpdir = os.sep.join(my_dirs[:i])
assert os.path.exists(tmpdir)
assert os.path.isdir(tmpdir)
finally:
for i in range(len(my_dirs), 0, -1):
tmpdir = os.sep.join(my_dirs[:i])
os.rmdir(tmpdir)
@pytest.mark.parametrize('query, target',
[('foo', 'foo'),
('foo.txt', 'foo'),
('/path/to/foo.txt', 'foo'),
('/path/to/foo', 'foo')])
def test_filebase(query, target):
assert target == util.filebase(query)
@pytest.fixture
def root_and_files():
root = tempfile.mkdtemp()
files = [[root, 'file1.txt'],
[root, 'sub1', 'file2.txt'],
[root, 'sub1', 'sub2', 'file3.txt'],
[root, 'sub1', 'sub2', 'sub3', 'file4.txt']]
files = [os.sep.join(_) for _ in files]
badfiles = [_.replace('.txt', '.csv') for _ in files]
# Create all the necessary directories
util.smkdirs(os.path.dirname(files[-1]))
# Create the dummy files
for fname in files + badfiles:
with open(fname, 'w'):
pass
yield root, files
for fname, badfname in zip(files[::-1], badfiles[::-1]):
os.remove(fname)
os.remove(badfname)
os.rmdir(os.path.dirname(fname))
@pytest.mark.parametrize('level', [1, 2, 3, 4])
@pytest.mark.parametrize('sort', [False, True])
def test_find_with_extension(root_and_files, level, sort):
root, files = root_and_files
results = util.find_with_extension(root, 'txt', depth=level, sort=sort)
assert sorted(results) == sorted(files[:level])
def test_expand_filepaths():
targets = ['foo.bar', 'dir/file.txt', 'dir2///file2.txt', '/q.bin']
target_dir = '/tmp'
paths = util.expand_filepaths(target_dir, targets)
for search, result in zip(targets, paths):
assert result == os.path.normpath(os.path.join(target_dir, search))
|
marl/jams
|
tests/test_util.py
|
Python
|
isc
| 5,008
|
[
"Brian"
] |
09dc1890462f3f9119e1a24063d5cab21fa25c5582b11e288cfc5f64dc81d986
|
# encoding: utf-8
'''Functional tests for CKAN's builtin page view tracking feature.'''
import tempfile
import csv
import datetime
import routes
import ckan.tests.legacy as tests
class TestTracking(object):
def tearDown(self):
import ckan.model as model
model.repo.rebuild_db()
def _get_app(self):
import paste.fixture
import pylons.test
return paste.fixture.TestApp(pylons.test.pylonsapp)
def _create_sysadmin(self, app):
'''Create a sysadmin user.
Returns a tuple (sysadmin_user_object, api_key).
'''
# You can't create a user via the api
# (ckan.auth.create_user_via_api = false is in test-core.ini) and you
# can't make your first sysadmin user via either the api or the web
# interface anyway, so access the model directly to make a sysadmin
# user.
import ckan.model as model
user = model.User(name='joeadmin', email='joe@admin.net',
password='joe rules')
user.sysadmin = True
model.Session.add(user)
model.repo.commit_and_remove()
return (tests.call_action_api(app, 'user_show', id=user.id),
user.apikey)
def _create_package(self, app, apikey, name='look_to_windward'):
'''Create a package via the action api.'''
return tests.call_action_api(app, 'package_create', apikey=apikey,
name=name)
def _create_resource(self, app, package, apikey):
'''Create a resource via the action api.'''
return tests.call_action_api(app, 'resource_create', apikey=apikey,
package_id=package['id'],
url='http://example.com')
def _post_to_tracking(self, app, url, type_='page', ip='199.204.138.90',
browser='firefox'):
'''Post some data to /_tracking directly.
This simulates what's supposed when you view a page with tracking
enabled (an ajax request posts to /_tracking).
'''
params = {'url': url, 'type': type_}
extra_environ = {
# The tracking middleware crashes if these aren't present.
'HTTP_USER_AGENT': browser,
'REMOTE_ADDR': ip,
'HTTP_ACCEPT_LANGUAGE': 'en',
'HTTP_ACCEPT_ENCODING': 'gzip, deflate',
}
app.post('/_tracking', params=params, extra_environ=extra_environ)
def _update_tracking_summary(self):
'''Update CKAN's tracking summary data.
This simulates calling `paster tracking update` on the command line.
'''
# FIXME: Can this be done as more of a functional test where we
# actually test calling the command and passing the args? By calling
# the method directly, we're not testing the command-line parsing.
import ckan.lib.cli
import ckan.model
date = (datetime.datetime.now() - datetime.timedelta(days=1)).strftime(
'%Y-%m-%d')
ckan.lib.cli.Tracking('Tracking').update_all(
engine=ckan.model.meta.engine, start_date=date)
def _rebuild_search_index(self):
'''Rebuild CKAN's search index.
This simulates calling `paster search-index rebuild` on the command
line.
'''
import ckan.lib.cli
ckan.lib.cli.SearchIndexCommand('SearchIndexCommand').rebuild()
def test_package_with_0_views(self):
app = self._get_app()
sysadmin_user, apikey = self._create_sysadmin(app)
package = self._create_package(app, apikey)
# The API should return 0 recent views and 0 total views for the
# unviewed package.
package = tests.call_action_api(app, 'package_show',
id=package['name'],
include_tracking=True)
tracking_summary = package['tracking_summary']
assert tracking_summary['recent'] == 0, ("A package that has not "
"been viewed should have 0 "
"recent views")
assert tracking_summary['total'] == 0, ("A package that has not "
"been viewed should have 0 "
"total views")
def test_resource_with_0_views(self):
app = self._get_app()
sysadmin_user, apikey = self._create_sysadmin(app)
package = self._create_package(app, apikey)
resource = self._create_resource(app, package, apikey)
# The package_show() API should return 0 recent views and 0 total
# views for the unviewed resource.
package = tests.call_action_api(app, 'package_show',
id=package['name'],
include_tracking=True)
assert len(package['resources']) == 1
resource = package['resources'][0]
tracking_summary = resource['tracking_summary']
assert tracking_summary['recent'] == 0, ("A resource that has not "
"been viewed should have 0 "
"recent views")
assert tracking_summary['total'] == 0, ("A resource that has not "
"been viewed should have 0 "
"total views")
# The resource_show() API should return 0 recent views and 0 total
# views for the unviewed resource.
resource = tests.call_action_api(app, 'resource_show',
id=resource['id'],
include_tracking=True)
tracking_summary = resource['tracking_summary']
assert tracking_summary['recent'] == 0, ("A resource that has not "
"been viewed should have 0 "
"recent views")
assert tracking_summary['total'] == 0, ("A resource that has not "
"been viewed should have 0 "
"total views")
def test_package_with_one_view(self):
app = self._get_app()
sysadmin_user, apikey = self._create_sysadmin(app)
package = self._create_package(app, apikey)
self._create_resource(app, package, apikey)
url = routes.url_for(controller='package', action='read',
id=package['name'])
self._post_to_tracking(app, url)
self._update_tracking_summary()
package = tests.call_action_api(app, 'package_show', id=package['id'],
include_tracking=True)
tracking_summary = package['tracking_summary']
assert tracking_summary['recent'] == 1, ("A package that has been "
"viewed once should have 1 "
"recent view.")
assert tracking_summary['total'] == 1, ("A package that has been "
"viewed once should have 1 "
"total view")
assert len(package['resources']) == 1
resource = package['resources'][0]
tracking_summary = resource['tracking_summary']
assert tracking_summary['recent'] == 0, ("Viewing a package should "
"not increase the recent "
"views of the package's "
"resources")
assert tracking_summary['total'] == 0, ("Viewing a package should "
"not increase the total views "
"of the package's resources")
def test_resource_with_one_preview(self):
app = self._get_app()
sysadmin_user, apikey = self._create_sysadmin(app)
package = self._create_package(app, apikey)
resource = self._create_resource(app, package, apikey)
url = routes.url_for(controller='package', action='resource_read',
id=package['name'], resource_id=resource['id'])
self._post_to_tracking(app, url)
self._update_tracking_summary()
package = tests.call_action_api(app, 'package_show', id=package['id'],
include_tracking=True)
assert len(package['resources']) == 1
resource = package['resources'][0]
assert package['tracking_summary']['recent'] == 0, ("Previewing a "
"resource should "
"not increase the "
"package's recent "
"views")
assert package['tracking_summary']['total'] == 0, ("Previewing a "
"resource should "
"not increase the "
"package's total "
"views")
# Yes, previewing a resource does _not_ increase its view count.
assert resource['tracking_summary']['recent'] == 0, ("Previewing a "
"resource should "
"not increase "
"the resource's "
"recent views")
assert resource['tracking_summary']['total'] == 0, ("Previewing a "
"resource should "
"not increase the "
"resource's "
"recent views")
def test_resource_with_one_download(self):
app = self._get_app()
sysadmin_user, apikey = self._create_sysadmin(app)
package = self._create_package(app, apikey)
resource = self._create_resource(app, package, apikey)
self._post_to_tracking(app, resource['url'], type_='resource')
self._update_tracking_summary()
package = tests.call_action_api(app, 'package_show', id=package['id'],
include_tracking=True)
assert len(package['resources']) == 1
resource = package['resources'][0]
assert package['tracking_summary']['recent'] == 0, (
"Downloading a resource should not increase the package's recent "
"views")
assert package['tracking_summary']['total'] == 0, (
"Downloading a resource should not increase the package's total "
"views")
assert resource['tracking_summary']['recent'] == 1, (
"Downloading a resource should increase the resource's recent "
"views")
assert resource['tracking_summary']['total'] == 1, (
"Downloading a resource should increase the resource's total "
"views")
# The resource_show() API should return the same result.
resource = tests.call_action_api(app, 'resource_show',
id=resource['id'],
include_tracking=True)
tracking_summary = resource['tracking_summary']
assert tracking_summary['recent'] == 1, (
"Downloading a resource should increase the resource's recent "
"views")
assert tracking_summary['total'] == 1, (
"Downloading a resource should increase the resource's total "
"views")
def test_view_page(self):
app = self._get_app()
# Visit the front page.
self._post_to_tracking(app, url='', type_='page')
# Visit the /organization page.
self._post_to_tracking(app, url='/organization', type_='page')
# Visit the /about page.
self._post_to_tracking(app, url='/about', type_='page')
self._update_tracking_summary()
# There's no way to export page-view (as opposed to resource or
# dataset) tracking summaries, eg. via the api or a paster command, the
# only way we can check them is through the model directly.
import ckan.model as model
for url in ('', '/organization', '/about'):
q = model.Session.query(model.TrackingSummary)
q = q.filter_by(url=url)
tracking_summary = q.one()
assert tracking_summary.count == 1, ("Viewing a page should "
"increase the page's view "
"count")
# For pages (as opposed to datasets and resources) recent_views and
# running_total always stay at 1. Shrug.
assert tracking_summary.recent_views == 0, (
"recent_views for a page is always 0")
assert tracking_summary.running_total == 0, (
"running_total for a page is always 0")
def test_package_with_many_views(self):
app = self._get_app()
sysadmin_user, apikey = self._create_sysadmin(app)
package = self._create_package(app, apikey)
self._create_resource(app, package, apikey)
url = routes.url_for(controller='package', action='read',
id=package['name'])
# View the package three times from different IPs.
self._post_to_tracking(app, url, ip='111.222.333.44')
self._post_to_tracking(app, url, ip='111.222.333.55')
self._post_to_tracking(app, url, ip='111.222.333.66')
self._update_tracking_summary()
package = tests.call_action_api(app, 'package_show', id=package['id'],
include_tracking=True)
tracking_summary = package['tracking_summary']
assert tracking_summary['recent'] == 3, (
"A package that has been viewed 3 times recently should have 3 "
"recent views")
assert tracking_summary['total'] == 3, (
"A package that has been viewed 3 times should have 3 total views")
assert len(package['resources']) == 1
resource = package['resources'][0]
tracking_summary = resource['tracking_summary']
assert tracking_summary['recent'] == 0, (
"Viewing a package should not increase the recent views of the "
"package's resources")
assert tracking_summary['total'] == 0, (
"Viewing a package should not increase the total views of the "
"package's resources")
def test_resource_with_many_downloads(self):
app = self._get_app()
sysadmin_user, apikey = self._create_sysadmin(app)
package = self._create_package(app, apikey)
resource = self._create_resource(app, package, apikey)
url = resource['url']
# Download the resource three times from different IPs.
self._post_to_tracking(app, url, type_='resource', ip='111.222.333.44')
self._post_to_tracking(app, url, type_='resource', ip='111.222.333.55')
self._post_to_tracking(app, url, type_='resource', ip='111.222.333.66')
self._update_tracking_summary()
package = tests.call_action_api(app, 'package_show', id=package['id'],
include_tracking=True)
assert len(package['resources']) == 1
resource = package['resources'][0]
tracking_summary = resource['tracking_summary']
assert tracking_summary['recent'] == 3, (
"A resource that has been downloaded 3 times recently should have "
"3 recent downloads")
assert tracking_summary['total'] == 3, (
"A resource that has been downloaded 3 times should have 3 total "
"downloads")
tracking_summary = package['tracking_summary']
assert tracking_summary['recent'] == 0, (
"Downloading a resource should not increase the resource's "
"package's recent views")
assert tracking_summary['total'] == 0, (
"Downloading a resource should not increase the resource's "
"package's total views")
def test_page_with_many_views(self):
app = self._get_app()
# View each page three times, from three different IPs.
for ip in ('111.111.11.111', '222.222.22.222', '333.333.33.333'):
# Visit the front page.
self._post_to_tracking(app, url='', type_='page', ip=ip)
# Visit the /organization page.
self._post_to_tracking(app, url='/organization', type_='page',
ip=ip)
# Visit the /about page.
self._post_to_tracking(app, url='/about', type_='page', ip=ip)
self._update_tracking_summary()
# There's no way to export page-view (as opposed to resource or
# dataset) tracking summaries, eg. via the api or a paster command, the
# only way we can check them if through the model directly.
import ckan.model as model
for url in ('', '/organization', '/about'):
q = model.Session.query(model.TrackingSummary)
q = q.filter_by(url=url)
tracking_summary = q.one()
assert tracking_summary.count == 3, (
"A page that has been viewed three times should have view "
"count 3")
# For pages (as opposed to datasets and resources) recent_views and
# running_total always stay at 1. Shrug.
assert tracking_summary.recent_views == 0, ("recent_views for "
"pages is always 0")
assert tracking_summary.running_total == 0, ("running_total for "
"pages is always 0")
def test_recent_views_expire(self):
# TODO
# Test that package, resource and page views (maybe 3 different tests)
# older than 14 days are counted as total views but not as recent
# views.
# Will probably have to access the model directly to insert tracking
# data older than 14 days.
pass
def test_dataset_view_count_throttling(self):
'''If the same user visits the same dataset multiple times on the same
day, only one view should get counted.
'''
app = self._get_app()
sysadmin_user, apikey = self._create_sysadmin(app)
package = self._create_package(app, apikey)
self._create_resource(app, package, apikey)
url = routes.url_for(controller='package', action='read',
id=package['name'])
# Visit the dataset three times from the same IP.
self._post_to_tracking(app, url)
self._post_to_tracking(app, url)
self._post_to_tracking(app, url)
self._update_tracking_summary()
package = tests.call_action_api(app, 'package_show', id=package['id'],
include_tracking=True)
tracking_summary = package['tracking_summary']
assert tracking_summary['recent'] == 1, ("Repeat dataset views should "
"not add to recent views "
"count")
assert tracking_summary['total'] == 1, ("Repeat dataset views should "
"not add to total views count")
def test_resource_download_count_throttling(self):
'''If the same user downloads the same resource multiple times on the
same day, only one view should get counted.
'''
app = self._get_app()
sysadmin_user, apikey = self._create_sysadmin(app)
package = self._create_package(app, apikey)
resource = self._create_resource(app, package, apikey)
# Download the resource three times from the same IP.
self._post_to_tracking(app, resource['url'], type_='resource')
self._post_to_tracking(app, resource['url'], type_='resource')
self._post_to_tracking(app, resource['url'], type_='resource')
self._update_tracking_summary()
resource = tests.call_action_api(app, 'resource_show',
id=resource['id'],
include_tracking=True)
tracking_summary = resource['tracking_summary']
assert tracking_summary['recent'] == 1, (
"Repeat resource downloads should not add to recent views count")
assert tracking_summary['total'] == 1, (
"Repeat resource downloads should not add to total views count")
def test_sorting_datasets_by_recent_views(self):
# FIXME: Have some datasets with different numbers of recent and total
# views, to make this a better test.
import ckan.lib.search
tests.setup_test_search_index()
app = self._get_app()
sysadmin_user, apikey = self._create_sysadmin(app)
self._create_package(app, apikey, name='consider_phlebas')
self._create_package(app, apikey, name='the_player_of_games')
self._create_package(app, apikey, name='use_of_weapons')
url = routes.url_for(controller='package', action='read',
id='consider_phlebas')
self._post_to_tracking(app, url)
url = routes.url_for(controller='package', action='read',
id='the_player_of_games')
self._post_to_tracking(app, url, ip='111.11.111.111')
self._post_to_tracking(app, url, ip='222.22.222.222')
url = routes.url_for(controller='package', action='read',
id='use_of_weapons')
self._post_to_tracking(app, url, ip='111.11.111.111')
self._post_to_tracking(app, url, ip='222.22.222.222')
self._post_to_tracking(app, url, ip='333.33.333.333')
self._update_tracking_summary()
ckan.lib.search.rebuild()
response = tests.call_action_api(app, 'package_search',
sort='views_recent desc')
assert response['count'] == 3
assert response['sort'] == 'views_recent desc'
packages = response['results']
assert packages[0]['name'] == 'use_of_weapons'
assert packages[1]['name'] == 'the_player_of_games'
assert packages[2]['name'] == 'consider_phlebas'
def test_sorting_datasets_by_total_views(self):
# FIXME: Have some datasets with different numbers of recent and total
# views, to make this a better test.
import ckan.lib.search
tests.setup_test_search_index()
app = self._get_app()
sysadmin_user, apikey = self._create_sysadmin(app)
self._create_package(app, apikey, name='consider_phlebas')
self._create_package(app, apikey, name='the_player_of_games')
self._create_package(app, apikey, name='use_of_weapons')
url = routes.url_for(controller='package', action='read',
id='consider_phlebas')
self._post_to_tracking(app, url)
url = routes.url_for(controller='package', action='read',
id='the_player_of_games')
self._post_to_tracking(app, url, ip='111.11.111.111')
self._post_to_tracking(app, url, ip='222.22.222.222')
url = routes.url_for(controller='package', action='read',
id='use_of_weapons')
self._post_to_tracking(app, url, ip='111.11.111.111')
self._post_to_tracking(app, url, ip='222.22.222.222')
self._post_to_tracking(app, url, ip='333.33.333.333')
self._update_tracking_summary()
ckan.lib.search.rebuild()
response = tests.call_action_api(app, 'package_search',
sort='views_total desc')
assert response['count'] == 3
assert response['sort'] == 'views_total desc'
packages = response['results']
assert packages[0]['name'] == 'use_of_weapons'
assert packages[1]['name'] == 'the_player_of_games'
assert packages[2]['name'] == 'consider_phlebas'
def test_popular_package(self):
# TODO
# Test that a package with > 10 views is marked as 'popular'.
# Currently the popular logic is in the templates, will have to move
# that into the logic and add 'popular': True/False to package dicts
# to make this testable.
# Also test that a package with < 10 views is not marked as popular.
# Test what kind of views count towards popularity, recent or total,
# and which don't.
pass
def test_popular_resource(self):
# TODO
# Test that a resource with > 10 views is marked as 'popular'.
# Currently the popular logic is in the templates, will have to move
# that into the logic and add 'popular': True/False to resource dicts
# to make this testable.
# Also test that a resource with < 10 views is not marked as popular.
# Test what kind of views count towards popularity, recent or total,
# and which don't.
pass
def test_same_user_visiting_different_pages_on_same_day(self):
# TODO
# Test that if the same user visits multiple pages on the same say,
# each visit gets counted (this should not get throttled)
# (May need to test for packages, resources and pages separately)
pass
def test_same_user_visiting_same_page_on_different_days(self):
# TODO
# Test that if the same user visits the same page on different days,
# each visit gets counted (this should not get throttled)
# (May need to test for packages, resources and pages separately)
# (Probably need to access the model directly to insert old visits
# into tracking_raw)
pass
def test_posting_bad_data_to_tracking(self):
# TODO: Test how /_tracking handles unexpected and invalid data.
pass
def _export_tracking_summary(self):
'''Export CKAN's tracking data and return it.
This simulates calling `paster tracking export` on the command line.
'''
# FIXME: Can this be done as more of a functional test where we
# actually test calling the command and passing the args? By calling
# the method directly, we're not testing the command-line parsing.
import ckan.lib.cli
import ckan.model
f = tempfile.NamedTemporaryFile()
ckan.lib.cli.Tracking('Tracking').export_tracking(
engine=ckan.model.meta.engine, output_filename=f.name)
lines = [line for line in csv.DictReader(open(f.name, 'r'))]
return lines
def test_export(self):
'''`paster tracking export` should export tracking data for all
datasets in CSV format.
Only dataset tracking data is output to CSV file, not resource or page
views.
'''
app = self._get_app()
sysadmin_user, apikey = self._create_sysadmin(app)
# Create a couple of packages.
package_1 = self._create_package(app, apikey)
package_2 = self._create_package(app, apikey, name='another_package')
# View the package_1 three times from different IPs.
url = routes.url_for(controller='package', action='read',
id=package_1['name'])
self._post_to_tracking(app, url, ip='111.222.333.44')
self._post_to_tracking(app, url, ip='111.222.333.55')
self._post_to_tracking(app, url, ip='111.222.333.66')
# View the package_2 twice from different IPs.
url = routes.url_for(controller='package', action='read',
id=package_2['name'])
self._post_to_tracking(app, url, ip='111.222.333.44')
self._post_to_tracking(app, url, ip='111.222.333.55')
self._update_tracking_summary()
lines = self._export_tracking_summary()
assert len(lines) == 2
package_1_data = lines[0]
assert package_1_data['total views'] == '3'
assert package_1_data['recent views (last 2 weeks)'] == '3'
package_2_data = lines[1]
assert package_2_data['total views'] == '2'
assert package_2_data['recent views (last 2 weeks)'] == '2'
def test_tracking_urls_with_languages(self):
# TODO
# Test that posting to eg /de/dataset/foo is counted the same as
# /dataset/foo.
# May need to test for dataset pages, resource previews, resource
# downloads, and other page views separately.
pass
def test_templates_tracking_enabled(self):
# TODO
# Test the the page view tracking JS is in the templates when
# ckan.tracking_enabled = true.
# Test that the sort by populatiy option is shown on the datasets page.
pass
def test_templates_tracking_disabled(self):
# TODO
# Test the the page view tracking JS is not in the templates when
# ckan.tracking_enabled = false.
# Test that the sort by populatiy option is not on the datasets page.
pass
def test_tracking_disabled(self):
# TODO
# Just to make sure, set ckan.tracking_enabled = false and then post
# a bunch of stuff to /_tracking and test that no tracking data is
# recorded. Maybe /_tracking should return something other than 200,
# as well.
# Could also test that 'tracking_summary' is _not_ in package and
# resource dicts from api when tracking is disabled.
pass
|
NicoVarg99/daf-recipes
|
ckan/ckan/ckan/ckan/tests/legacy/functional/test_tracking.py
|
Python
|
gpl-3.0
| 30,370
|
[
"VisIt"
] |
abc9f93f5eabfa0bbd8bd685b6057f891973e681689e8ae6416e105a9726c798
|
#!/usr/bin/env python3
#
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2006-2014, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""cclib: parsers and algorithms for computational chemistry
cclib is a Python library that provides parsers for computational
chemistry log files. It also provides a platform to implement
algorithms in a package-independent manner.
"""
doclines = __doc__.split("\n")
# Chosen from http://www.python.org/pypi?:action=list_classifiers
classifiers = """Development Status :: 5 - Production/Stable
Environment :: Console
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)
Natural Language :: English
Operating System :: OS Independent
Programming Language :: Python
Topic :: Scientific/Engineering :: Chemistry
Topic :: Software Development :: Libraries :: Python Modules"""
programs = ['ADF', 'GAMESS', 'GAMESS-UK', 'Gaussian', 'Jaguar', 'Molpro', 'NWChem', 'ORCA', 'Psi', 'QChem']
def setup_cclib():
import os
import sys
# Import from setuptools only if requested.
if 'egg' in sys.argv:
sys.argv.pop(sys.argv.index('egg'))
from setuptools import setup
else:
from distutils.core import setup
# The list of packages to be installed.
cclib_packages = ['cclib', 'cclib.parser', 'cclib.progress', 'cclib.method', 'cclib.bridge']
setup(
name = "cclib",
version = "1.3",
url = "http://cclib.github.io/",
author = "cclib development team",
author_email = "cclib-users@lists.sourceforge.net",
maintainer = "cclib development team",
maintainer_email = "cclib-users@lists.sourceforge.net",
license = "LGPL",
description = doclines[0],
long_description = "\n".join(doclines[2:]),
classifiers = classifiers.split("\n"),
platforms = ["Any."],
packages = cclib_packages,
package_dir = { '' : 'src' },
scripts = ["src/scripts/ccget", "src/scripts/cda"],
)
if __name__ == '__main__':
setup_cclib()
|
Clyde-fare/cclib
|
setup.py
|
Python
|
lgpl-2.1
| 2,481
|
[
"ADF",
"GAMESS",
"Gaussian",
"Jaguar",
"Molpro",
"NWChem",
"ORCA",
"cclib"
] |
cdc093e584dcf2d2e75a33de875902e0e9295c426cd66a8537cff0380d0e745f
|
import unittest
import numpy as np
from pyscf.pbc import gto
from pyscf.pbc import scf,cc
from pyscf import cc as mol_cc
from pyscf.pbc.tools.pbc import super_cell
a0 = 4
vac = 200
bas = [[ 3.0/2.0*a0,np.sqrt(3.0)/2.0*a0, 0],
[-3.0/2.0*a0,np.sqrt(3.0)/2.0*a0, 0],
[ 0, 0,vac]]
pos = [['H',(-a0/2.0,0,0)],
['H',( a0/2.0,0,0)]]
cell = gto.M(unit='B',a=bas,atom=pos,basis='cc-pvdz',verbose=4)
nmp = [2,2,1]
nk = np.prod(nmp)
nao = cell.nao_nr()
#primitive cell with k points
kpts = cell.make_kpts(nmp)
nkpts = len(kpts)
kmf = scf.KUHF(cell,kpts,exxdiv=None).density_fit()
kmf.chkfile = 'kpt.chk'
nao_half = nao//2
dmk = np.zeros([2, nkpts, nao, nao])
for i in range(nkpts):
for j in range(2):
dmk[0][i][j,j] = 0.5
dmk[1][i][j+nao_half, j+nao_half] = 0.5
ehf = kmf.kernel(dmk)
kcc = cc.KUCCSD(kmf)
ecc,t1,t2 = kcc.kernel()
print('========================================')
print('UHF energy (kpts) %f \n' % (float(ehf)))
print('UCCSD correlation energy (kpts) %f \n' % (float(ecc)))
print('========================================')
# Gamma point supercell calculation
supcell = super_cell(cell,nmp)
dms = np.zeros([2, supcell.nao_nr(), supcell.nao_nr()])
for i in range(nkpts):
for j in range(2):
dms[0][j+i*nao][j+i*nao] = 0.5
dms[1][j+i*nao+nao_half][j+i*nao+nao_half] = 0.5
gmf = scf.UHF(supcell,exxdiv=None).density_fit()
gmf.chkfile = 'supcell.chk'
ehf = gmf.kernel(dms)
gcc = cc.UCCSD(gmf)
ecc,t1,t2 = gcc.kernel()
print('========================================')
print('UHF energy (supercell) %f' % (float(ehf)/nk))
print('UCCSD correlation energy (supercell) %f' % (float(ecc)/nk))
print('========================================')
|
gkc1000/pyscf
|
pyscf/pbc/cc/test/test_h_2x2x1.py
|
Python
|
apache-2.0
| 1,747
|
[
"PySCF"
] |
f8429df6f89e0711200fc28765d09070664af2ca1788f212582ba297286ace7b
|
"""Bartlett test for common dispersion."""
import numpy as np
from scipy import stats
def bartlett_test(models):
"""
Bartlett test for common dispersion.
Performs an Bartlett test for common dispersion between the models. The
idea for age-period-cohort models is described in Harnau (2018). The test
checks whether we can reject that the dispersion is common across models.
Parameters
----------
models : list
List of fitted apc.Models.
Returns
-------
test_results : dict
Dictionary with keys `B`, `LR`, `m` and `p_value`.
See Also
--------
Vignette in apc/vignettes/vignette_misspecification.ipynb.
Notes
-----
For interpretation, a small p-value speaks against the hypothesis that the
dispersion is equal across models.
Tests are valid for gaussian models (Bartlett 1937), log-normal and
over-dispersed Poisson (Harnau 2018) and generalized log-normal models
(Kuang and Nielsen 2018).
References
----------
- Bartlett, M. S. (1937). Properties of Sufficiency and Statistical Tests.
Proceedings of the Royal Society A: Mathematical, Physical and Engineering
Sciences, 160(901), 268–282.
- Harnau, J. (2018). Misspecification Tests for Log-Normal and
Over-Dispersed Poisson Chain-Ladder Models. Risks, 6(2), 25. Open Access:
https://doi.org/10.3390/RISKS6020025
- Kuang, D., & Nielsen, B. (2018). Generalized Log-Normal Chain-Ladder.
ArXiv E-Prints, 1806.05939. Download from http://arxiv.org/abs/1806.05939
Examples
--------
>>> model = apc.Model()
>>> model.data_from_df(apc.loss_VNJ())
>>> model.fit('log_normal_response', 'AC')
>>> sub_models = [model.sub_model(coh_from_to=(1,5)),
... model.sub_model(coh_from_to=(6,10))]
>>> apc.bartlett_test(sub_models)
"""
# Check if all models have the same family
families = [model.family for model in models]
if families.count(families[0]) != len(families):
raise ValueError('Model families must match ' +
'across models.')
s2 = np.array([model.s2 for model in models])
df = np.array([model.df_resid for model in models])
df_sum = np.sum(df)
s_bar = s2.dot(df)/df_sum
m = len(models)
LR = df_sum * np.log(s_bar) - df.dot(np.log(s2))
C = 1 + 1/(3*(m-1)) * (np.sum(1/df) - 1/df_sum)
p_value = stats.distributions.chi2.sf(LR/C, df=m-1)
test_results = {'B': LR/C, 'LR': LR, 'C': C, 'm': m, 'p_value': p_value}
return test_results
|
JonasHarnau/apc
|
apc/bartlett_test.py
|
Python
|
gpl-3.0
| 2,562
|
[
"Gaussian"
] |
36d0a63aa36d0fdaf6aa2d2be6196000d3d6c729ba1e6cfa419c26f969fa6ff6
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Overlap computation
######################
Utilities for computing the overlap between gaussian type functions.
"""
import numpy as np
from numba import jit, prange
from .numerical import fac, fac2, dfac21, sdist, choose
from .car2sph import car2sph_scaled
from exatomic.base import nbche
#################################
# Primitive cartesian integrals #
#################################
@jit(nopython=True, nogil=True, cache=nbche)
def _fj(j, l, m, a, b):
"""From Handbook of Computational Quantum Chemistry by David B. Cook
in chapter 7.7.1 -- Essentially a FOILing of the pre-exponential
cartesian power dependence in one dimension."""
tot, i, f = 0., max(0, j - m), min(j, l) + 1
for k in prange(i, f):
tot += (choose(l, k) *
choose(m, int(j - k)) *
a ** (l - k) *
b ** (m + k - j))
return tot
@jit(nopython=True,nogil=True, cache=nbche)
def _nin(l, m, pa, pb, p, N):
"""From Handbook of Computational Quantum Chemistry by David B. Cook
in chapter 7.7.1 -- Sums the result of _fj over the total angular momentum
in one dimension."""
ltot = l + m
if not ltot: return N
tot = 0.
for j in prange(int(ltot // 2 + 1)):
tot += (_fj(2 * j, l, m, pa, pb) *
dfac21(j) / (2 * p) ** j)
return tot * N
@jit(nopython=True, nogil=True, cache=nbche)
def _gaussian_product(a, b, ax, ay, az, bx, by, bz):
"""
From Molecular Electronic-Structure Theory by Trygve Helgaker et al.
Computes a product gaussian following section 9.2.3; see equations
9.2.10 through 9.2.15.
"""
p = a + b
mu = a * b / p
px = (a * ax + b * bx) / p
py = (a * ay + b * by) / p
pz = (a * az + b * bz) / p
ab2 = sdist(ax, ay, az, bx, by, bz)
return (np.sqrt(np.pi / p), p, mu, ab2,
px - ax, py - ay, pz - az,
px - bx, py - by, pz - bz)
@jit(nopython=True, nogil=True, cache=nbche)
def _primitive_overlap_product(l1, m1, n1, l2, m2, n2,
N, p, mu, ab2, pax, pay, paz, pbx, pby, pbz):
"""Compute primitive cartesian overlap integral in terms of a gaussian product."""
return (np.exp(-mu * ab2) * _nin(l1, l2, pax, pbx, p, N)
* _nin(m1, m2, pay, pby, p, N)
* _nin(n1, n2, paz, pbz, p, N))
@jit(nopython=True, nogil=True, cache=nbche)
def _primitive_overlap(a1, a2, ax, ay, az, bx, by, bz, l1, m1, n1, l2, m2, n2):
"""Compute a primitive cartesian overlap integral."""
#N, p, mu, ab2, pax, pay, paz, pbx, pby, pbz = \
p = _gaussian_product(a1, a2, ax, ay, az, bx, by, bz)
return _primitive_overlap_product(l1, m1, n1, l2, m2, n2, *p)
@jit(nopython=True, nogil=True, cache=nbche)
def _primitive_kinetic(a1, a2, ax, ay, az, bx, by, bz, l1, m1, n1, l2, m2, n2):
"""Compute the kinetic energy as a linear combination of overlap terms."""
#N, p, mu, ab2, pax, pay, paz, pbx, pby, pbz = \
p = _gaussian_product(a1, a2, ax, ay, az, bx, by, bz)
t = 4 * a1 * a2 * _primitive_overlap_product(l1 - 1, m1, n1, l2 - 1, m2, n2, *p)
t += 4 * a1 * a2 * _primitive_overlap_product(l1, m1 - 1, n1, l2, m2 - 2, n2, *p)
t += 4 * a1 * a2 * _primitive_overlap_product(l1, m1, n1 - 1, l2, m2, n2 - 1, *p)
if l1 and l2:
t += l1 * l2 * _primitive_overlap_product(l1 - 1, m1, n1, l2 - 1, m2, n2, *p)
if m1 and m2:
t += l1 * l2 * _primitive_overlap_product(l1, m1 - 1, n1, l2, m2 - 1, n2, *p)
if n1 and n2:
t += l1 * l2 * _primitive_overlap_product(l1, m1, n1 - 1, l2, m2, n2 - 1, *p)
if l1: t -= 2 * a2 * l1 * _primitive_overlap_product(l1 - 1, m1, n1, l2 + 1, m2, n2, *p)
if l2: t -= 2 * a1 * l2 * _primitive_overlap_product(l1 + 1, m1, n1, l2 - 1, m2, n2, *p)
if m1: t -= 2 * a2 * m1 * _primitive_overlap_product(l1, m1 - 1, n1, l2, m2 + 1, n2, *p)
if m2: t -= 2 * a1 * m2 * _primitive_overlap_product(l1, m1 + 1, n1, l2, m2 - 1, n2, *p)
if n1: t -= 2 * a2 * n1 * _primitive_overlap_product(l1, m1, n1 - 1, l2, m2, n2 + 1, *p)
if n2: t -= 2 * a1 * n2 * _primitive_overlap_product(l1, m1, n1 + 1, l2, m2, n2 - 1, *p)
return t / 2
######################################
# Generators over shells/shell-pairs #
######################################
@jit(nopython=True, nogil=True, cache=False)
def _iter_atom_shells(ptrs, xyzs, *shls):
"""Generator yielding indices, atomic coordinates and basis set shells."""
nshl = len(ptrs)
for i in range(nshl):
pa, pi = ptrs[i]
yield (i, xyzs[pa][0], xyzs[pa][1], xyzs[pa][2], shls[pi])
@jit(nopython=True, nogil=True, cache=False)
def _iter_atom_shell_pairs(ptrs, xyzs, *shls):
"""Generator yielding indices, atomic coordinates and basis set
shells in block-pair order."""
nshl = len(ptrs)
for i in range(nshl):
for j in range(i + 1):
pa, pi = ptrs[i]
pb, pj = ptrs[j]
yield (i, j, xyzs[pa][0], xyzs[pa][1], xyzs[pa][2],
xyzs[pb][0], xyzs[pb][1], xyzs[pb][2],
shls[pi], shls[pj])
############################################
# Integral processing for of Shell objects #
############################################
@jit(nopython=True, nogil=True, cache=nbche)
def _cartesian_overlap_shell(xa, ya, za, xb, yb, zb,
li, mi, ni, lj, mj, nj,
ialpha, jalpha):
"""Compute pairwise cartesian integrals exponents in a block-pair."""
pints = np.empty((len(ialpha), len(jalpha)))
for i, ia in enumerate(ialpha):
for j, ja in enumerate(jalpha):
pints[i, j] = _primitive_overlap(ia, ja,
xa, ya, za, xb, yb, zb,
li, mi, ni, lj, mj, nj)
return pints
@jit(nopython=True, nogil=True, cache=nbche)
def _cartesian_shell_pair(ax, ay, az, bx, by, bz, ishl, jshl):
"""Compute fully contracted block-pair integrals including
expansion of angular momentum dependence."""
inrm = ishl.norm_contract()
jnrm = jshl.norm_contract()
ideg = (ishl.L + 1) * (ishl.L + 2) // 2
jdeg = (jshl.L + 1) * (jshl.L + 2) // 2
pint = np.empty((ideg * ishl.nprim, jdeg * jshl.nprim))
for magi, (li, mi, ni) in enumerate(ishl.enum_cartesian()):
for magj, (lj, mj, nj) in enumerate(jshl.enum_cartesian()):
ianc = magi * ishl.nprim
janc = magj * jshl.nprim
pint[ianc : ianc + ishl.nprim,
janc : janc + jshl.nprim] = \
_cartesian_overlap_shell(ax, ay, az, bx, by, bz,
li, mi, ni, lj, mj, nj,
ishl.alphas, jshl.alphas)
if ishl.L:
inrm = np.kron(np.eye(ideg), inrm)
if ishl.spherical:
inrm = np.dot(inrm, np.kron(car2sph_scaled(ishl.L),
np.eye(ishl.ncont)))
if jshl.L:
jnrm = np.kron(np.eye(jdeg), jnrm)
if jshl.spherical:
jnrm = np.dot(jnrm, np.kron(car2sph_scaled(jshl.L),
np.eye(jshl.ncont)))
return np.dot(inrm.T, np.dot(pint, jnrm))
@jit(nopython=True, nogil=True, cache=False)
def _cartesian_shell_pairs(ndim, ptrs, xyzs, *shls):
"""Construct a full square (overlap) integral matrix."""
cart = np.zeros((ndim, ndim))
ii = 0
for i, j, ax, ay, az, bx, by, bz, ishl, jshl \
in _iter_atom_shell_pairs(ptrs, xyzs, *shls):
if not j: jj = 0
cint = _cartesian_shell_pair(ax, ay, az, bx, by, bz, ishl, jshl)
iblk, jblk = cint.shape
cart[ii : ii + iblk, jj : jj + jblk] = cint
if i != j: cart[jj : jj + jblk, ii : ii + iblk] = cint.T
else: ii += iblk
jj += jblk
return cart
##################################
# Obara-Saika recursion relation #
##################################
@jit(nopython=True, nogil=True, cache=nbche)
def _obara_s_recurr(p, l, m, pa, pb, s):
"""There is a bug in this function. Do not use."""
if not l + m: return s
p2 = 1 / (2 * p)
s0 = np.zeros((l + 1, m + 1))
s0[0, 0] = s
if l: s0[1, 0] = pa * s
if m: s0[0, 1] = pb * s
if l and m: s0[1, 1] = pb * s0[1, 0] + p2 * s
for i in range(1, l):
for j in range(1, m):
mul = p2 * (i * s0[i - 1, j] + j * s0[i, j - 1])
s0[i + 1, j] = pa * s0[i, j] + mul
s0[i, j + 1] = pb * s0[i, j] + mul
s0[i + 1, j + 1] = pa * s0[i, j + 1] + p2 * ((i + 1) * s0[i, j] + j * s0[i + 1, j])
return s0[l, m]
@jit(nopython=True, nogil=True, cache=nbche)
def _nin(o1, o2, po1, po2, gamma, pg12):
"""Helper function for gaussian overlap between 2 centers."""
otot = o1 + o2
if not otot: return pg12
if otot % 2: otot -= 1
oio = 0.
for i in range(otot // 2 + 1):
k = 2 * i
prod = pg12 * fac2(k - 1) / ((2 * gamma) ** i)
qlo = max(-k, (k - 2 * o2))
qhi = min( k, (2 * o1 - k)) + 1
fk = 0.
for q in range(qlo, qhi, 2):
xx = (k + q) // 2
zz = (k - q) // 2
newt1 = fac(o1) / fac(xx) / fac(o1 - xx)
newt2 = fac(o2) / fac(zz) / fac(o2 - zz)
fk += newt1 * newt2 * (po1 ** (o1 - xx)) * (po2 ** (o2 - zz))
oio += prod * fk
return oio
|
avmarchenko/exatomic
|
exatomic/algorithms/overlap.py
|
Python
|
apache-2.0
| 9,560
|
[
"Gaussian"
] |
b858ee1039a5369b612a3c74f830817e11c9a978d25a23d0ce7f87cbb1f4c99f
|
# coding=utf-8
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ExprVisitor."""
import ast
import subprocess
import textwrap
import unittest
from grumpy.compiler import block
from grumpy.compiler import expr_visitor
from grumpy.compiler import shard_test
from grumpy.compiler import util
def _MakeExprTest(expr):
def Test(self):
code = 'assert ({}) == ({!r}), {!r}'.format(expr, eval(expr), expr) # pylint: disable=eval-used
self.assertEqual((0, ''), _GrumpRun(code))
return Test
def _MakeLiteralTest(lit):
def Test(self):
status, output = _GrumpRun('print repr({!r}),'.format(lit))
self.assertEqual(0, status, output)
self.assertEqual(lit, eval(output)) # pylint: disable=eval-used
return Test
def _MakeSliceTest(subscript, want):
"""Define a test function that evaluates a slice expression."""
def Test(self):
code = textwrap.dedent("""\
class Slicer(object):
def __getitem__(self, slice):
print slice
Slicer()[{}]""")
status, output = _GrumpRun(code.format(subscript))
self.assertEqual(0, status, output)
self.assertEqual(want, output.strip())
return Test
class ExprVisitorTest(unittest.TestCase):
# pylint: disable=invalid-name
def testAttribute(self):
code = textwrap.dedent("""\
class Foo(object):
bar = 42
assert Foo.bar == 42""")
self.assertEqual((0, ''), _GrumpRun(code))
testBinOpArithmeticAdd = _MakeExprTest('1 + 2')
testBinOpArithmeticAnd = _MakeExprTest('7 & 12')
testBinOpArithmeticDiv = _MakeExprTest('8 / 4')
testBinOpArithmeticFloorDiv = _MakeExprTest('8 // 4')
testBinOpArithmeticFloorDivRemainder = _MakeExprTest('5 // 2')
testBinOpArithmeticMod = _MakeExprTest('9 % 5')
testBinOpArithmeticMul = _MakeExprTest('3 * 2')
testBinOpArithmeticOr = _MakeExprTest('2 | 6')
testBinOpArithmeticSub = _MakeExprTest('10 - 3')
testBinOpArithmeticXor = _MakeExprTest('3 ^ 5')
def testBinOpNotImplemented(self):
self.assertRaisesRegexp(util.ParseError, 'binary op not implemented',
_ParseAndVisitExpr, 'x ** y')
testBoolOpTrueAndFalse = _MakeExprTest('True and False')
testBoolOpTrueAndTrue = _MakeExprTest('True and True')
testBoolOpTrueAndExpr = _MakeExprTest('True and 2 == 2')
testBoolOpTrueOrFalse = _MakeExprTest('True or False')
testBoolOpFalseOrFalse = _MakeExprTest('False or False')
testBoolOpFalseOrExpr = _MakeExprTest('False or 2 == 2')
def testCall(self):
code = textwrap.dedent("""\
def foo():
print 'bar'
foo()""")
self.assertEqual((0, 'bar\n'), _GrumpRun(code))
def testCallKeywords(self):
code = textwrap.dedent("""\
def foo(a=1, b=2):
print a, b
foo(b=3)""")
self.assertEqual((0, '1 3\n'), _GrumpRun(code))
def testCallVarArgs(self):
code = textwrap.dedent("""\
def foo(a, b):
print a, b
foo(*(123, 'abc'))""")
self.assertEqual((0, '123 abc\n'), _GrumpRun(code))
def testCallKwargs(self):
code = textwrap.dedent("""\
def foo(a, b=2):
print a, b
foo(**{'a': 4})""")
self.assertEqual((0, '4 2\n'), _GrumpRun(code))
testCompareLT = _MakeExprTest('1 < 2')
testCompareLE = _MakeExprTest('7 <= 12')
testCompareEq = _MakeExprTest('8 == 4')
testCompareNE = _MakeExprTest('9 != 5')
testCompareGE = _MakeExprTest('3 >= 2')
testCompareGT = _MakeExprTest('2 > 6')
testCompareLTLT = _MakeExprTest('3 < 6 < 9')
testCompareLTEq = _MakeExprTest('3 < 6 == 9')
testCompareLTGE = _MakeExprTest('3 < 6 >= -2')
testCompareGTEq = _MakeExprTest('88 > 12 == 12')
testCompareInStr = _MakeExprTest('"1" in "abc"')
testCompareInTuple = _MakeExprTest('1 in (1, 2, 3)')
testCompareNotInTuple = _MakeExprTest('10 < 12 not in (1, 2, 3)')
testDictEmpty = _MakeLiteralTest({})
testDictNonEmpty = _MakeLiteralTest({'foo': 42, 'bar': 43})
testDictCompFor = _MakeExprTest('{x: str(x) for x in range(3)}')
testDictCompForIf = _MakeExprTest(
'{x: 3 * x for x in range(10) if x % 3 == 0}')
testDictCompForFor = _MakeExprTest(
'{x: y for x in range(3) for y in range(x)}')
testGeneratorExpFor = _MakeExprTest('tuple(int(x) for x in "123")')
testGeneratorExpForIf = _MakeExprTest(
'tuple(x / 3 for x in range(10) if x % 3)')
testGeneratorExprForFor = _MakeExprTest(
'tuple(x + y for x in range(3) for y in range(x + 2))')
testIfExpr = _MakeExprTest('1 if True else 0')
testIfExprCompound = _MakeExprTest('42 if "ab" == "a" + "b" else 24')
testIfExprNested = _MakeExprTest(
'"foo" if "" else "bar" if 0 else "baz"')
testLambda = _MakeExprTest('(lambda: 123)()')
testLambda = _MakeExprTest('(lambda a, b: (a, b))("foo", "bar")')
testLambda = _MakeExprTest('(lambda a, b=3: (a, b))("foo")')
testLambda = _MakeExprTest('(lambda *args: args)(1, 2, 3)')
testLambda = _MakeExprTest('(lambda **kwargs: kwargs)(x="foo", y="bar")')
testListEmpty = _MakeLiteralTest([])
testListNonEmpty = _MakeLiteralTest([1, 2])
testListCompFor = _MakeExprTest('[int(x) for x in "123"]')
testListCompForIf = _MakeExprTest('[x / 3 for x in range(10) if x % 3]')
testListCompForFor = _MakeExprTest(
'[x + y for x in range(3) for y in range(x + 2)]')
def testNameGlobal(self):
code = textwrap.dedent("""\
foo = 123
assert foo == 123""")
self.assertEqual((0, ''), _GrumpRun(code))
def testNameLocal(self):
code = textwrap.dedent("""\
def foo():
bar = 'abc'
assert bar == 'abc'
foo()""")
self.assertEqual((0, ''), _GrumpRun(code))
testNumInt = _MakeLiteralTest(42)
testNumLong = _MakeLiteralTest(42L)
testNumIntLarge = _MakeLiteralTest(12345678901234567890)
testNumFloat = _MakeLiteralTest(102.1)
testNumFloatNoDecimal = _MakeLiteralTest(5.)
testNumFloatOnlyDecimal = _MakeLiteralTest(.5)
testNumFloatSci = _MakeLiteralTest(1e6)
testNumFloatSciCap = _MakeLiteralTest(1E6)
testNumFloatSciCapPlus = _MakeLiteralTest(1E+6)
testNumFloatSciMinus = _MakeLiteralTest(1e-6)
testSubscriptDictStr = _MakeExprTest('{"foo": 42}["foo"]')
testSubscriptListInt = _MakeExprTest('[1, 2, 3][2]')
testSubscriptTupleSliceStart = _MakeExprTest('(1, 2, 3)[2:]')
testSubscriptTupleSliceStartStop = _MakeExprTest('(1, 2, 3)[10:11]')
testSubscriptTupleSliceStartStep = _MakeExprTest('(1, 2, 3, 4, 5, 6)[-2::-2]')
testSubscriptStartStop = _MakeSliceTest('2:3', 'slice(2, 3, None)')
testSubscriptMultiDim = _MakeSliceTest('1,2,3', '(1, 2, 3)')
testSubscriptStartStopObjects = _MakeSliceTest(
'True:False', 'slice(True, False, None)')
testSubscriptMultiDimSlice = _MakeSliceTest(
"'foo','bar':'baz':'qux'", "('foo', slice('bar', 'baz', 'qux'))")
testStrEmpty = _MakeLiteralTest('')
testStrAscii = _MakeLiteralTest('abc')
testStrUtf8 = _MakeLiteralTest('\tfoo\n\xcf\x80')
testStrQuoted = _MakeLiteralTest('"foo"')
testStrUtf16 = _MakeLiteralTest(u'\u0432\u043e\u043b\u043d')
testTupleEmpty = _MakeLiteralTest(())
testTupleNonEmpty = _MakeLiteralTest((1, 2, 3))
testUnaryOpNot = _MakeExprTest('not True')
testUnaryOpInvert = _MakeExprTest('~4')
def testUnaryOpNotImplemented(self):
self.assertRaisesRegexp(util.ParseError, 'unary op not implemented',
_ParseAndVisitExpr, '+foo')
def _MakeModuleBlock():
return block.ModuleBlock('__main__', 'grumpy', 'grumpy/lib', '<test>', [])
def _ParseExpr(expr):
return ast.parse(expr).body[0].value
def _ParseAndVisitExpr(expr):
writer = util.Writer()
visitor = expr_visitor.ExprVisitor(_MakeModuleBlock(), writer)
visitor.visit(_ParseExpr(expr))
return writer.out.getvalue()
def _GrumpRun(cmd):
p = subprocess.Popen(['grumprun'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = p.communicate(cmd)
return p.returncode, out
if __name__ == '__main__':
shard_test.main()
|
AlexEKoren/grumpy
|
compiler/expr_visitor_test.py
|
Python
|
apache-2.0
| 8,527
|
[
"VisIt"
] |
69b0d2b6b80159e4075627ccfd1dc4a520043bc1443122fb85c1d327d06b8c07
|
from math import sqrt
def is_prime(n):
if n <= 1:
return False
elif n in [2, 3]:
return True
# To understand the statement below, please visit https://github.com/mre/the-coding-interview/pull/33
elif n % 6 not in [1, 5]:
return False
for i in range(3, int(sqrt(n))+1, 2): # change range to xrange for python2
if n % i == 0:
return False
return True
print(is_prime(13))
|
mre/the-coding-interview
|
problems/prime-number/prime-number.py
|
Python
|
mit
| 443
|
[
"VisIt"
] |
37171ca739f3990fc13790fb23f8f24a074a87380e055423c1ceaec4658e08dc
|
# this program corresponds to special.py
### Means test is not done yet
# E Means test is giving error (E)
# F Means test is failing (F)
# EF Means test is giving error and Failing
#! Means test is segfaulting
# 8 Means test runs forever
### test_besselpoly
### test_mathieu_a
### test_mathieu_even_coef
### test_mathieu_odd_coef
### test_modfresnelp
### test_modfresnelm
# test_pbdv_seq
### test_pbvv_seq
### test_sph_harm
# test_sph_in
# test_sph_jn
# test_sph_kn
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy import array, isnan, r_, arange, finfo, pi, sin, cos, tan, exp, \
log, zeros, sqrt, asarray, inf, nan_to_num, real, arctan, float_
from numpy.testing import assert_equal, assert_almost_equal, \
assert_array_equal, assert_array_almost_equal, assert_approx_equal, \
assert_, rand, dec, TestCase, run_module_suite, assert_allclose, \
assert_raises
from scipy import special
import scipy.special._ufuncs as cephes
from scipy.special import ellipk
from scipy.special._testutils import assert_tol_equal, with_special_errors, \
assert_func_equal
class TestCephes(TestCase):
def test_airy(self):
cephes.airy(0)
def test_airye(self):
cephes.airye(0)
def test_binom(self):
n = np.array([0.264, 4, 5.2, 17])
k = np.array([2, 0.4, 7, 3.3])
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
rknown = np.array([[-0.097152, 0.9263051596159367, 0.01858423645695389,
-0.007581020651518199],[6, 2.0214389119675666, 0, 2.9827344527963846],
[10.92, 2.22993515861399, -0.00585728, 10.468891352063146],
[136, 3.5252179590758828, 19448, 1024.5526916174495]])
assert_func_equal(cephes.binom, rknown.ravel(), nk, rtol=1e-13)
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.arange(-7, 30), 1000*np.random.rand(30) - 500]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_2(self):
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.logspace(1, 300, 20)]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_exact(self):
@np.vectorize
def binom_int(n, k):
n = int(n)
k = int(k)
num = int(1)
den = int(1)
for i in range(1, k+1):
num *= i + n - k
den *= i
return float(num/den)
np.random.seed(1234)
n = np.arange(1, 15)
k = np.arange(0, 15)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
nk = nk[nk[:,0] >= nk[:,1]]
assert_func_equal(cephes.binom,
binom_int(nk[:,0], nk[:,1]),
nk,
atol=0, rtol=0)
def test_bdtr(self):
assert_equal(cephes.bdtr(1,1,0.5),1.0)
def test_bdtri(self):
assert_equal(cephes.bdtri(1,3,0.5),0.5)
def test_bdtrc(self):
assert_equal(cephes.bdtrc(1,3,0.5),0.5)
def test_bdtrin(self):
assert_equal(cephes.bdtrin(1,0,1),5.0)
def test_bdtrik(self):
cephes.bdtrik(1,3,0.5)
def test_bei(self):
assert_equal(cephes.bei(0),0.0)
def test_beip(self):
assert_equal(cephes.beip(0),0.0)
def test_ber(self):
assert_equal(cephes.ber(0),1.0)
def test_berp(self):
assert_equal(cephes.berp(0),0.0)
def test_besselpoly(self):
assert_equal(cephes.besselpoly(0,0,0),1.0)
def test_beta(self):
assert_equal(cephes.beta(1,1),1.0)
def test_betainc(self):
assert_equal(cephes.betainc(1,1,1),1.0)
def test_betaln(self):
assert_equal(cephes.betaln(1,1),0.0)
def test_betaincinv(self):
assert_equal(cephes.betaincinv(1,1,1),1.0)
def test_beta_inf(self):
assert_(np.isinf(special.beta(-1, 2)))
def test_btdtr(self):
assert_equal(cephes.btdtr(1,1,1),1.0)
def test_btdtri(self):
assert_equal(cephes.btdtri(1,1,1),1.0)
def test_btdtria(self):
assert_equal(cephes.btdtria(1,1,1),5.0)
def test_btdtrib(self):
assert_equal(cephes.btdtrib(1,1,1),5.0)
def test_cbrt(self):
assert_approx_equal(cephes.cbrt(1),1.0)
def test_chdtr(self):
assert_equal(cephes.chdtr(1,0),0.0)
def test_chdtrc(self):
assert_equal(cephes.chdtrc(1,0),1.0)
def test_chdtri(self):
assert_equal(cephes.chdtri(1,1),0.0)
def test_chdtriv(self):
assert_equal(cephes.chdtriv(0,0),5.0)
def test_chndtr(self):
assert_equal(cephes.chndtr(0,1,0),0.0)
p = cephes.chndtr(np.linspace(20, 25, 5), 2, 1.07458615e+02)
assert_allclose(p, [1.21805009e-09, 2.81979982e-09, 6.25652736e-09,
1.33520017e-08, 2.74909967e-08],
rtol=1e-6, atol=0)
assert_almost_equal(cephes.chndtr(np.inf, np.inf, 0), 2.0)
assert_almost_equal(cephes.chndtr(2, 1, np.inf), 0.0)
assert_(np.isnan(cephes.chndtr(np.nan, 1, 2)))
assert_(np.isnan(cephes.chndtr(5, np.nan, 2)))
assert_(np.isnan(cephes.chndtr(5, 1, np.nan)))
def test_chndtridf(self):
assert_equal(cephes.chndtridf(0,0,1),5.0)
def test_chndtrinc(self):
assert_equal(cephes.chndtrinc(0,1,0),5.0)
def test_chndtrix(self):
assert_equal(cephes.chndtrix(0,1,0),0.0)
def test_cosdg(self):
assert_equal(cephes.cosdg(0),1.0)
def test_cosm1(self):
assert_equal(cephes.cosm1(0),0.0)
def test_cotdg(self):
assert_almost_equal(cephes.cotdg(45),1.0)
def test_dawsn(self):
assert_equal(cephes.dawsn(0),0.0)
assert_allclose(cephes.dawsn(1.23), 0.50053727749081767)
def test_ellipe(self):
assert_equal(cephes.ellipe(1),1.0)
def test_ellipeinc(self):
assert_equal(cephes.ellipeinc(0,1),0.0)
def test_ellipj(self):
cephes.ellipj(0,1)
def test_ellipk(self):
assert_allclose(ellipk(0), pi/2)
def test_ellipkinc(self):
assert_equal(cephes.ellipkinc(0,0),0.0)
def test_erf(self):
assert_equal(cephes.erf(0),0.0)
def test_erfc(self):
assert_equal(cephes.erfc(0),1.0)
def test_exp1(self):
cephes.exp1(1)
def test_expi(self):
cephes.expi(1)
def test_expn(self):
cephes.expn(1,1)
def test_exp1_reg(self):
# Regression for #834
a = cephes.exp1(-complex(19.9999990))
b = cephes.exp1(-complex(19.9999991))
assert_array_almost_equal(a.imag, b.imag)
def test_exp10(self):
assert_approx_equal(cephes.exp10(2),100.0)
def test_exp2(self):
assert_equal(cephes.exp2(2),4.0)
def test_expm1(self):
assert_equal(cephes.expm1(0),0.0)
def test_fdtr(self):
assert_equal(cephes.fdtr(1,1,0),0.0)
def test_fdtrc(self):
assert_equal(cephes.fdtrc(1,1,0),1.0)
def test_fdtri(self):
# cephes.fdtri(1,1,0.5) #BUG: gives NaN, should be 1
assert_allclose(cephes.fdtri(1, 1, [0.499, 0.501]),
array([0.9937365, 1.00630298]), rtol=1e-6)
def test_fdtridfd(self):
assert_equal(cephes.fdtridfd(1,0,0),5.0)
def test_fresnel(self):
assert_equal(cephes.fresnel(0),(0.0,0.0))
def test_gamma(self):
assert_equal(cephes.gamma(5),24.0)
def test_gammainc(self):
assert_equal(cephes.gammainc(5,0),0.0)
def test_gammaincc(self):
assert_equal(cephes.gammaincc(5,0),1.0)
def test_gammainccinv(self):
assert_equal(cephes.gammainccinv(5,1),0.0)
def test_gammaln(self):
cephes.gammaln(10)
def test_gammasgn(self):
vals = np.array([-4, -3.5, -2.3, 1, 4.2], np.float64)
assert_array_equal(cephes.gammasgn(vals), np.sign(cephes.rgamma(vals)))
def test_gdtr(self):
assert_equal(cephes.gdtr(1,1,0),0.0)
def test_gdtrc(self):
assert_equal(cephes.gdtrc(1,1,0),1.0)
def test_gdtria(self):
assert_equal(cephes.gdtria(0,1,1),0.0)
def test_gdtrib(self):
cephes.gdtrib(1,0,1)
# assert_equal(cephes.gdtrib(1,0,1),5.0)
def test_gdtrix(self):
cephes.gdtrix(1,1,.1)
def test_hankel1(self):
cephes.hankel1(1,1)
def test_hankel1e(self):
cephes.hankel1e(1,1)
def test_hankel2(self):
cephes.hankel2(1,1)
def test_hankel2e(self):
cephes.hankel2e(1,1)
def test_hyp1f1(self):
assert_approx_equal(cephes.hyp1f1(1,1,1), exp(1.0))
assert_approx_equal(cephes.hyp1f1(3,4,-6), 0.026056422099537251095)
cephes.hyp1f1(1,1,1)
def test_hyp1f2(self):
cephes.hyp1f2(1,1,1,1)
def test_hyp2f0(self):
cephes.hyp2f0(1,1,1,1)
def test_hyp2f1(self):
assert_equal(cephes.hyp2f1(1,1,1,0),1.0)
def test_hyp3f0(self):
assert_equal(cephes.hyp3f0(1,1,1,0),(1.0,0.0))
def test_hyperu(self):
assert_equal(cephes.hyperu(0,1,1),1.0)
def test_i0(self):
assert_equal(cephes.i0(0),1.0)
def test_i0e(self):
assert_equal(cephes.i0e(0),1.0)
def test_i1(self):
assert_equal(cephes.i1(0),0.0)
def test_i1e(self):
assert_equal(cephes.i1e(0),0.0)
def test_it2i0k0(self):
cephes.it2i0k0(1)
def test_it2j0y0(self):
cephes.it2j0y0(1)
def test_it2struve0(self):
cephes.it2struve0(1)
def test_itairy(self):
cephes.itairy(1)
def test_iti0k0(self):
assert_equal(cephes.iti0k0(0),(0.0,0.0))
def test_itj0y0(self):
assert_equal(cephes.itj0y0(0),(0.0,0.0))
def test_itmodstruve0(self):
assert_equal(cephes.itmodstruve0(0),0.0)
def test_itstruve0(self):
assert_equal(cephes.itstruve0(0),0.0)
def test_iv(self):
assert_equal(cephes.iv(1,0),0.0)
def _check_ive(self):
assert_equal(cephes.ive(1,0),0.0)
def test_j0(self):
assert_equal(cephes.j0(0),1.0)
def test_j1(self):
assert_equal(cephes.j1(0),0.0)
def test_jn(self):
assert_equal(cephes.jn(0,0),1.0)
def test_jv(self):
assert_equal(cephes.jv(0,0),1.0)
def _check_jve(self):
assert_equal(cephes.jve(0,0),1.0)
def test_k0(self):
cephes.k0(2)
def test_k0e(self):
cephes.k0e(2)
def test_k1(self):
cephes.k1(2)
def test_k1e(self):
cephes.k1e(2)
def test_kei(self):
cephes.kei(2)
def test_keip(self):
assert_equal(cephes.keip(0),0.0)
def test_ker(self):
cephes.ker(2)
def test_kerp(self):
cephes.kerp(2)
def _check_kelvin(self):
cephes.kelvin(2)
def test_kn(self):
cephes.kn(1,1)
def test_kolmogi(self):
assert_equal(cephes.kolmogi(1),0.0)
assert_(np.isnan(cephes.kolmogi(np.nan)))
def test_kolmogorov(self):
assert_equal(cephes.kolmogorov(0),1.0)
def _check_kv(self):
cephes.kv(1,1)
def _check_kve(self):
cephes.kve(1,1)
def test_log1p(self):
assert_equal(cephes.log1p(0),0.0)
def test_lpmv(self):
assert_equal(cephes.lpmv(0,0,1),1.0)
def test_mathieu_a(self):
assert_equal(cephes.mathieu_a(1,0),1.0)
def test_mathieu_b(self):
assert_equal(cephes.mathieu_b(1,0),1.0)
def test_mathieu_cem(self):
assert_equal(cephes.mathieu_cem(1,0,0),(1.0,0.0))
# Test AMS 20.2.27
@np.vectorize
def ce_smallq(m, q, z):
z *= np.pi/180
if m == 0:
return 2**(-0.5) * (1 - .5*q*cos(2*z)) # + O(q^2)
elif m == 1:
return cos(z) - q/8 * cos(3*z) # + O(q^2)
elif m == 2:
return cos(2*z) - q*(cos(4*z)/12 - 1/4) # + O(q^2)
else:
return cos(m*z) - q*(cos((m+2)*z)/(4*(m+1)) - cos((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(0, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_cem(m[:,None], q[None,:], 0.123)[0],
ce_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_sem(self):
assert_equal(cephes.mathieu_sem(1,0,0),(0.0,1.0))
# Test AMS 20.2.27
@np.vectorize
def se_smallq(m, q, z):
z *= np.pi/180
if m == 1:
return sin(z) - q/8 * sin(3*z) # + O(q^2)
elif m == 2:
return sin(2*z) - q*sin(4*z)/12 # + O(q^2)
else:
return sin(m*z) - q*(sin((m+2)*z)/(4*(m+1)) - sin((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(1, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_sem(m[:,None], q[None,:], 0.123)[0],
se_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_modcem1(self):
assert_equal(cephes.mathieu_modcem1(1,0,0),(0.0,0.0))
def test_mathieu_modcem2(self):
cephes.mathieu_modcem2(1,1,1)
# Test reflection relation AMS 20.6.19
m = np.arange(0, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modcem2(m, q, -z)[0]
fr = -cephes.mathieu_modcem2(m, q, 0)[0] / cephes.mathieu_modcem1(m, q, 0)[0]
y2 = -cephes.mathieu_modcem2(m, q, z)[0] - 2*fr*cephes.mathieu_modcem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_modsem1(self):
assert_equal(cephes.mathieu_modsem1(1,0,0),(0.0,0.0))
def test_mathieu_modsem2(self):
cephes.mathieu_modsem2(1,1,1)
# Test reflection relation AMS 20.6.20
m = np.arange(1, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modsem2(m, q, -z)[0]
fr = cephes.mathieu_modsem2(m, q, 0)[1] / cephes.mathieu_modsem1(m, q, 0)[1]
y2 = cephes.mathieu_modsem2(m, q, z)[0] - 2*fr*cephes.mathieu_modsem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_overflow(self):
# Check that these return NaNs instead of causing a SEGV
assert_equal(cephes.mathieu_cem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_cem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem2(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem2(10000, 1.5, 1.3), (np.nan, np.nan))
def test_mathieu_ticket_1847(self):
# Regression test --- this call had some out-of-bounds access
# and could return nan occasionally
for k in range(60):
v = cephes.mathieu_modsem2(2, 100, -1)
# Values from ACM TOMS 804 (derivate by numerical differentiation)
assert_allclose(v[0], 0.1431742913063671074347, rtol=1e-10)
assert_allclose(v[1], 0.9017807375832909144719, rtol=1e-4)
def test_modfresnelm(self):
cephes.modfresnelm(0)
def test_modfresnelp(self):
cephes.modfresnelp(0)
def _check_modstruve(self):
assert_equal(cephes.modstruve(1,0),0.0)
def test_nbdtr(self):
assert_equal(cephes.nbdtr(1,1,1),1.0)
def test_nbdtrc(self):
assert_equal(cephes.nbdtrc(1,1,1),0.0)
def test_nbdtri(self):
assert_equal(cephes.nbdtri(1,1,1),1.0)
def __check_nbdtrik(self):
cephes.nbdtrik(1,.4,.5)
def test_nbdtrin(self):
assert_equal(cephes.nbdtrin(1,0,0),5.0)
def test_ncfdtr(self):
assert_equal(cephes.ncfdtr(1,1,1,0),0.0)
def test_ncfdtri(self):
assert_equal(cephes.ncfdtri(1,1,1,0),0.0)
def test_ncfdtridfd(self):
cephes.ncfdtridfd(1,0.5,0,1)
def __check_ncfdtridfn(self):
cephes.ncfdtridfn(1,0.5,0,1)
def __check_ncfdtrinc(self):
cephes.ncfdtrinc(1,0.5,0,1)
def test_nctdtr(self):
assert_equal(cephes.nctdtr(1,0,0),0.5)
assert_equal(cephes.nctdtr(9, 65536, 45), 0.0)
assert_approx_equal(cephes.nctdtr(np.inf, 1., 1.), 0.5, 5)
assert_(np.isnan(cephes.nctdtr(2., np.inf, 10.)))
assert_approx_equal(cephes.nctdtr(2., 1., np.inf), 1.)
assert_(np.isnan(cephes.nctdtr(np.nan, 1., 1.)))
assert_(np.isnan(cephes.nctdtr(2., np.nan, 1.)))
assert_(np.isnan(cephes.nctdtr(2., 1., np.nan)))
def __check_nctdtridf(self):
cephes.nctdtridf(1,0.5,0)
def test_nctdtrinc(self):
cephes.nctdtrinc(1,0,0)
def test_nctdtrit(self):
cephes.nctdtrit(.1,0.2,.5)
def test_ndtr(self):
assert_equal(cephes.ndtr(0), 0.5)
assert_almost_equal(cephes.ndtr(1), 0.84134474606)
def test_ndtri(self):
assert_equal(cephes.ndtri(0.5),0.0)
def test_nrdtrimn(self):
assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0)
def test_nrdtrisd(self):
assert_tol_equal(cephes.nrdtrisd(0.5,0.5,0.5), 0.0,
atol=0, rtol=0)
def test_obl_ang1(self):
cephes.obl_ang1(1,1,1,0)
def test_obl_ang1_cv(self):
result = cephes.obl_ang1_cv(1,1,1,1,0)
assert_almost_equal(result[0],1.0)
assert_almost_equal(result[1],0.0)
def _check_obl_cv(self):
assert_equal(cephes.obl_cv(1,1,0),2.0)
def test_obl_rad1(self):
cephes.obl_rad1(1,1,1,0)
def test_obl_rad1_cv(self):
cephes.obl_rad1_cv(1,1,1,1,0)
def test_obl_rad2(self):
cephes.obl_rad2(1,1,1,0)
def test_obl_rad2_cv(self):
cephes.obl_rad2_cv(1,1,1,1,0)
def test_pbdv(self):
assert_equal(cephes.pbdv(1,0),(0.0,1.0))
def test_pbvv(self):
cephes.pbvv(1,0)
def test_pbwa(self):
cephes.pbwa(1,0)
def test_pdtr(self):
val = cephes.pdtr(0, 1)
assert_almost_equal(val, np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtr([0, 1, 2], 0.0)
assert_array_equal(val, [1, 1, 1])
def test_pdtrc(self):
val = cephes.pdtrc(0, 1)
assert_almost_equal(val, 1 - np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtrc([0, 1, 2], 0.0)
assert_array_equal(val, [0, 0, 0])
def test_pdtri(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
cephes.pdtri(0.5,0.5)
def test_pdtrik(self):
k = cephes.pdtrik(0.5, 1)
assert_almost_equal(cephes.gammaincc(k + 1, 1), 0.5)
# Edge case: m = 0 or very small.
k = cephes.pdtrik([[0], [0.25], [0.95]], [0, 1e-20, 1e-6])
assert_array_equal(k, np.zeros((3, 3)))
def test_pro_ang1(self):
cephes.pro_ang1(1,1,1,0)
def test_pro_ang1_cv(self):
assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0),
array((1.0,0.0)))
def _check_pro_cv(self):
assert_equal(cephes.pro_cv(1,1,0),2.0)
def test_pro_rad1(self):
cephes.pro_rad1(1,1,1,0.1)
def test_pro_rad1_cv(self):
cephes.pro_rad1_cv(1,1,1,1,0)
def test_pro_rad2(self):
cephes.pro_rad2(1,1,1,0)
def test_pro_rad2_cv(self):
cephes.pro_rad2_cv(1,1,1,1,0)
def test_psi(self):
cephes.psi(1)
def test_radian(self):
assert_equal(cephes.radian(0,0,0),0)
def test_rgamma(self):
assert_equal(cephes.rgamma(1),1.0)
def test_round(self):
assert_equal(cephes.round(3.4),3.0)
assert_equal(cephes.round(-3.4),-3.0)
assert_equal(cephes.round(3.6),4.0)
assert_equal(cephes.round(-3.6),-4.0)
assert_equal(cephes.round(3.5),4.0)
assert_equal(cephes.round(-3.5),-4.0)
def test_shichi(self):
cephes.shichi(1)
def test_sici(self):
cephes.sici(1)
s, c = cephes.sici(np.inf)
assert_almost_equal(s, np.pi * 0.5)
assert_almost_equal(c, 0)
s, c = cephes.sici(-np.inf)
assert_almost_equal(s, -np.pi * 0.5)
assert_(np.isnan(c), "cosine integral(-inf) is not nan")
def test_sindg(self):
assert_equal(cephes.sindg(90),1.0)
def test_smirnov(self):
assert_equal(cephes.smirnov(1,.1),0.9)
assert_(np.isnan(cephes.smirnov(1,np.nan)))
def test_smirnovi(self):
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.4)),0.4)
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.6)),0.6)
assert_(np.isnan(cephes.smirnovi(1,np.nan)))
def test_spence(self):
assert_equal(cephes.spence(1),0.0)
def test_stdtr(self):
assert_equal(cephes.stdtr(1,0),0.5)
assert_almost_equal(cephes.stdtr(1,1), 0.75)
assert_almost_equal(cephes.stdtr(1,2), 0.852416382349)
def test_stdtridf(self):
cephes.stdtridf(0.7,1)
def test_stdtrit(self):
cephes.stdtrit(1,0.7)
def test_struve(self):
assert_equal(cephes.struve(0,0),0.0)
def test_tandg(self):
assert_equal(cephes.tandg(45),1.0)
def test_tklmbda(self):
assert_almost_equal(cephes.tklmbda(1,1),1.0)
def test_y0(self):
cephes.y0(1)
def test_y1(self):
cephes.y1(1)
def test_yn(self):
cephes.yn(1,1)
def test_yv(self):
cephes.yv(1,1)
def _check_yve(self):
cephes.yve(1,1)
def test_zeta(self):
cephes.zeta(2,2)
def test_zetac(self):
assert_equal(cephes.zetac(0),-1.5)
def test_wofz(self):
z = [complex(624.2,-0.26123), complex(-0.4,3.), complex(0.6,2.),
complex(-1.,1.), complex(-1.,-9.), complex(-1.,9.),
complex(-0.0000000234545,1.1234), complex(-3.,5.1),
complex(-53,30.1), complex(0.0,0.12345),
complex(11,1), complex(-22,-2), complex(9,-28),
complex(21,-33), complex(1e5,1e5), complex(1e14,1e14)
]
w = [
complex(-3.78270245518980507452677445620103199303131110e-7,
0.000903861276433172057331093754199933411710053155),
complex(0.1764906227004816847297495349730234591778719532788,
-0.02146550539468457616788719893991501311573031095617),
complex(0.2410250715772692146133539023007113781272362309451,
0.06087579663428089745895459735240964093522265589350),
complex(0.30474420525691259245713884106959496013413834051768,
-0.20821893820283162728743734725471561394145872072738),
complex(7.317131068972378096865595229600561710140617977e34,
8.321873499714402777186848353320412813066170427e34),
complex(0.0615698507236323685519612934241429530190806818395,
-0.00676005783716575013073036218018565206070072304635),
complex(0.3960793007699874918961319170187598400134746631,
-5.593152259116644920546186222529802777409274656e-9),
complex(0.08217199226739447943295069917990417630675021771804,
-0.04701291087643609891018366143118110965272615832184),
complex(0.00457246000350281640952328010227885008541748668738,
-0.00804900791411691821818731763401840373998654987934),
complex(0.8746342859608052666092782112565360755791467973338452,
0.),
complex(0.00468190164965444174367477874864366058339647648741,
0.0510735563901306197993676329845149741675029197050),
complex(-0.0023193175200187620902125853834909543869428763219,
-0.025460054739731556004902057663500272721780776336),
complex(9.11463368405637174660562096516414499772662584e304,
3.97101807145263333769664875189354358563218932e305),
complex(-4.4927207857715598976165541011143706155432296e281,
-2.8019591213423077494444700357168707775769028e281),
complex(2.820947917809305132678577516325951485807107151e-6,
2.820947917668257736791638444590253942253354058e-6),
complex(2.82094791773878143474039725787438662716372268e-15,
2.82094791773878143474039725773333923127678361e-15)
]
assert_func_equal(cephes.wofz, w, z, rtol=1e-13)
class TestAiry(TestCase):
def test_airy(self):
# This tests the airy function to ensure 8 place accuracy in computation
x = special.airy(.99)
assert_array_almost_equal(x,array([0.13689066,-0.16050153,1.19815925,0.92046818]),8)
x = special.airy(.41)
assert_array_almost_equal(x,array([0.25238916,-.23480512,0.80686202,0.51053919]),8)
x = special.airy(-.36)
assert_array_almost_equal(x,array([0.44508477,-0.23186773,0.44939534,0.48105354]),8)
def test_airye(self):
a = special.airye(0.01)
b = special.airy(0.01)
b1 = [None]*4
for n in range(2):
b1[n] = b[n]*exp(2.0/3.0*0.01*sqrt(0.01))
for n in range(2,4):
b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01))))
assert_array_almost_equal(a,b1,6)
def test_bi_zeros(self):
bi = special.bi_zeros(2)
bia = (array([-1.17371322, -3.2710930]),
array([-2.29443968, -4.07315509]),
array([-0.45494438, 0.39652284]),
array([0.60195789, -0.76031014]))
assert_array_almost_equal(bi,bia,4)
def test_ai_zeros(self):
ai = special.ai_zeros(1)
assert_array_almost_equal(ai,(array([-2.33810741]),
array([-1.01879297]),
array([0.5357]),
array([0.7012])),4)
class TestAssocLaguerre(TestCase):
def test_assoc_laguerre(self):
a1 = special.genlaguerre(11,1)
a2 = special.assoc_laguerre(.2,11,1)
assert_array_almost_equal(a2,a1(.2),8)
a2 = special.assoc_laguerre(1,11,1)
assert_array_almost_equal(a2,a1(1),8)
class TestBesselpoly(TestCase):
def test_besselpoly(self):
pass
class TestKelvin(TestCase):
def test_bei(self):
mbei = special.bei(2)
assert_almost_equal(mbei, 0.9722916273066613,5) # this may not be exact
def test_beip(self):
mbeip = special.beip(2)
assert_almost_equal(mbeip,0.91701361338403631,5) # this may not be exact
def test_ber(self):
mber = special.ber(2)
assert_almost_equal(mber,0.75173418271380821,5) # this may not be exact
def test_berp(self):
mberp = special.berp(2)
assert_almost_equal(mberp,-0.49306712470943909,5) # this may not be exact
def test_bei_zeros(self):
bi = special.bi_zeros(5)
assert_array_almost_equal(bi[0],array([-1.173713222709127,
-3.271093302836352,
-4.830737841662016,
-6.169852128310251,
-7.376762079367764]),11)
assert_array_almost_equal(bi[1],array([-2.294439682614122,
-4.073155089071828,
-5.512395729663599,
-6.781294445990305,
-7.940178689168587]),10)
assert_array_almost_equal(bi[2],array([-0.454944383639657,
0.396522836094465,
-0.367969161486959,
0.349499116831805,
-0.336026240133662]),11)
assert_array_almost_equal(bi[3],array([0.601957887976239,
-0.760310141492801,
0.836991012619261,
-0.88947990142654,
0.929983638568022]),11)
def test_beip_zeros(self):
bip = special.beip_zeros(5)
assert_array_almost_equal(bip,array([3.772673304934953,
8.280987849760042,
12.742147523633703,
17.193431752512542,
21.641143941167325]),4)
def test_ber_zeros(self):
ber = special.ber_zeros(5)
assert_array_almost_equal(ber,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
def test_berp_zeros(self):
brp = special.berp_zeros(5)
assert_array_almost_equal(brp,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
def test_kelvin(self):
mkelv = special.kelvin(2)
assert_array_almost_equal(mkelv,(special.ber(2) + special.bei(2)*1j,
special.ker(2) + special.kei(2)*1j,
special.berp(2) + special.beip(2)*1j,
special.kerp(2) + special.keip(2)*1j),8)
def test_kei(self):
mkei = special.kei(2)
assert_almost_equal(mkei,-0.20240006776470432,5)
def test_keip(self):
mkeip = special.keip(2)
assert_almost_equal(mkeip,0.21980790991960536,5)
def test_ker(self):
mker = special.ker(2)
assert_almost_equal(mker,-0.041664513991509472,5)
def test_kerp(self):
mkerp = special.kerp(2)
assert_almost_equal(mkerp,-0.10660096588105264,5)
def test_kei_zeros(self):
kei = special.kei_zeros(5)
assert_array_almost_equal(kei,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
def test_keip_zeros(self):
keip = special.keip_zeros(5)
assert_array_almost_equal(keip,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
# numbers come from 9.9 of A&S pg. 381
def test_kelvin_zeros(self):
tmp = special.kelvin_zeros(5)
berz,beiz,kerz,keiz,berpz,beipz,kerpz,keipz = tmp
assert_array_almost_equal(berz,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
assert_array_almost_equal(beiz,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
assert_array_almost_equal(kerz,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44382]),4)
assert_array_almost_equal(keiz,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
assert_array_almost_equal(berpz,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
assert_array_almost_equal(beipz,array([3.77267,
# table from 1927 had 3.77320
# but this is more accurate
8.28099,
12.74215,
17.19343,
21.64114]),4)
assert_array_almost_equal(kerpz,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
assert_array_almost_equal(keipz,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
def test_ker_zeros(self):
ker = special.ker_zeros(5)
assert_array_almost_equal(ker,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44381]),4)
def test_kerp_zeros(self):
kerp = special.kerp_zeros(5)
assert_array_almost_equal(kerp,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
class TestBernoulli(TestCase):
def test_bernoulli(self):
brn = special.bernoulli(5)
assert_array_almost_equal(brn,array([1.0000,
-0.5000,
0.1667,
0.0000,
-0.0333,
0.0000]),4)
class TestBeta(TestCase):
def test_beta(self):
bet = special.beta(2,4)
betg = (special.gamma(2)*special.gamma(4))/special.gamma(6)
assert_almost_equal(bet,betg,8)
def test_betaln(self):
betln = special.betaln(2,4)
bet = log(abs(special.beta(2,4)))
assert_almost_equal(betln,bet,8)
def test_betainc(self):
btinc = special.betainc(1,1,.2)
assert_almost_equal(btinc,0.2,8)
def test_betaincinv(self):
y = special.betaincinv(2,4,.5)
comp = special.betainc(2,4,y)
assert_almost_equal(comp,.5,5)
class TestCombinatorics(TestCase):
def test_comb(self):
assert_array_almost_equal(special.comb([10, 10], [3, 4]), [120., 210.])
assert_almost_equal(special.comb(10, 3), 120.)
assert_equal(special.comb(10, 3, exact=True), 120)
assert_equal(special.comb(10, 3, exact=True, repetition=True), 220)
def test_comb_with_np_int64(self):
n = 70
k = 30
np_n = np.int64(n)
np_k = np.int64(k)
assert_equal(special.comb(np_n, np_k, exact=True),
special.comb(n, k, exact=True))
def test_comb_zeros(self):
assert_equal(special.comb(2, 3, exact=True), 0)
assert_equal(special.comb(-1, 3, exact=True), 0)
assert_equal(special.comb(2, -1, exact=True), 0)
assert_equal(special.comb(2, -1, exact=False), 0)
assert_array_almost_equal(special.comb([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 120.])
def test_perm(self):
assert_array_almost_equal(special.perm([10, 10], [3, 4]), [720., 5040.])
assert_almost_equal(special.perm(10, 3), 720.)
assert_equal(special.perm(10, 3, exact=True), 720)
def test_perm_zeros(self):
assert_equal(special.perm(2, 3, exact=True), 0)
assert_equal(special.perm(-1, 3, exact=True), 0)
assert_equal(special.perm(2, -1, exact=True), 0)
assert_equal(special.perm(2, -1, exact=False), 0)
assert_array_almost_equal(special.perm([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 720.])
class TestTrigonometric(TestCase):
def test_cbrt(self):
cb = special.cbrt(27)
cbrl = 27**(1.0/3.0)
assert_approx_equal(cb,cbrl)
def test_cbrtmore(self):
cb1 = special.cbrt(27.9)
cbrl1 = 27.9**(1.0/3.0)
assert_almost_equal(cb1,cbrl1,8)
def test_cosdg(self):
cdg = special.cosdg(90)
cdgrl = cos(pi/2.0)
assert_almost_equal(cdg,cdgrl,8)
def test_cosdgmore(self):
cdgm = special.cosdg(30)
cdgmrl = cos(pi/6.0)
assert_almost_equal(cdgm,cdgmrl,8)
def test_cosm1(self):
cs = (special.cosm1(0),special.cosm1(.3),special.cosm1(pi/10))
csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1)
assert_array_almost_equal(cs,csrl,8)
def test_cotdg(self):
ct = special.cotdg(30)
ctrl = tan(pi/6.0)**(-1)
assert_almost_equal(ct,ctrl,8)
def test_cotdgmore(self):
ct1 = special.cotdg(45)
ctrl1 = tan(pi/4.0)**(-1)
assert_almost_equal(ct1,ctrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.cotdg(45), 1.0, 14)
assert_almost_equal(special.cotdg(-45), -1.0, 14)
assert_almost_equal(special.cotdg(90), 0.0, 14)
assert_almost_equal(special.cotdg(-90), 0.0, 14)
assert_almost_equal(special.cotdg(135), -1.0, 14)
assert_almost_equal(special.cotdg(-135), 1.0, 14)
assert_almost_equal(special.cotdg(225), 1.0, 14)
assert_almost_equal(special.cotdg(-225), -1.0, 14)
assert_almost_equal(special.cotdg(270), 0.0, 14)
assert_almost_equal(special.cotdg(-270), 0.0, 14)
assert_almost_equal(special.cotdg(315), -1.0, 14)
assert_almost_equal(special.cotdg(-315), 1.0, 14)
assert_almost_equal(special.cotdg(765), 1.0, 14)
def test_sinc(self):
# the sinc implementation and more extensive sinc tests are in numpy
assert_array_equal(special.sinc([0]), 1)
assert_equal(special.sinc(0.0), 1.0)
def test_sindg(self):
sn = special.sindg(90)
assert_equal(sn,1.0)
def test_sindgmore(self):
snm = special.sindg(30)
snmrl = sin(pi/6.0)
assert_almost_equal(snm,snmrl,8)
snm1 = special.sindg(45)
snmrl1 = sin(pi/4.0)
assert_almost_equal(snm1,snmrl1,8)
class TestTandg(TestCase):
def test_tandg(self):
tn = special.tandg(30)
tnrl = tan(pi/6.0)
assert_almost_equal(tn,tnrl,8)
def test_tandgmore(self):
tnm = special.tandg(45)
tnmrl = tan(pi/4.0)
assert_almost_equal(tnm,tnmrl,8)
tnm1 = special.tandg(60)
tnmrl1 = tan(pi/3.0)
assert_almost_equal(tnm1,tnmrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.tandg(0), 0.0, 14)
assert_almost_equal(special.tandg(45), 1.0, 14)
assert_almost_equal(special.tandg(-45), -1.0, 14)
assert_almost_equal(special.tandg(135), -1.0, 14)
assert_almost_equal(special.tandg(-135), 1.0, 14)
assert_almost_equal(special.tandg(180), 0.0, 14)
assert_almost_equal(special.tandg(-180), 0.0, 14)
assert_almost_equal(special.tandg(225), 1.0, 14)
assert_almost_equal(special.tandg(-225), -1.0, 14)
assert_almost_equal(special.tandg(315), -1.0, 14)
assert_almost_equal(special.tandg(-315), 1.0, 14)
class TestEllip(TestCase):
def test_ellipj_nan(self):
"""Regression test for #912."""
special.ellipj(0.5, np.nan)
def test_ellipj(self):
el = special.ellipj(0.2,0)
rel = [sin(0.2),cos(0.2),1.0,0.20]
assert_array_almost_equal(el,rel,13)
def test_ellipk(self):
elk = special.ellipk(.2)
assert_almost_equal(elk,1.659623598610528,11)
def test_ellipkinc(self):
elkinc = special.ellipkinc(pi/2,.2)
elk = special.ellipk(0.2)
assert_almost_equal(elkinc,elk,15)
alpha = 20*pi/180
phi = 45*pi/180
m = sin(alpha)**2
elkinc = special.ellipkinc(phi,m)
assert_almost_equal(elkinc,0.79398143,8)
# From pg. 614 of A & S
def test_ellipe(self):
ele = special.ellipe(.2)
assert_almost_equal(ele,1.4890350580958529,8)
def test_ellipeinc(self):
eleinc = special.ellipeinc(pi/2,.2)
ele = special.ellipe(0.2)
assert_almost_equal(eleinc,ele,14)
# pg 617 of A & S
alpha, phi = 52*pi/180,35*pi/180
m = sin(alpha)**2
eleinc = special.ellipeinc(phi,m)
assert_almost_equal(eleinc, 0.58823065, 8)
class TestErf(TestCase):
def test_erf(self):
er = special.erf(.25)
assert_almost_equal(er,0.2763263902,8)
def test_erf_zeros(self):
erz = special.erf_zeros(5)
erzr = array([1.45061616+1.88094300j,
2.24465928+2.61657514j,
2.83974105+3.17562810j,
3.33546074+3.64617438j,
3.76900557+4.06069723j])
assert_array_almost_equal(erz,erzr,4)
def _check_variant_func(self, func, other_func, rtol, atol=0):
np.random.seed(1234)
n = 10000
x = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
y = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
z = x + 1j*y
old_errors = np.seterr(all='ignore')
try:
w = other_func(z)
w_real = other_func(x).real
mask = np.isfinite(w)
w = w[mask]
z = z[mask]
mask = np.isfinite(w_real)
w_real = w_real[mask]
x = x[mask]
# test both real and complex variants
assert_func_equal(func, w, z, rtol=rtol, atol=atol)
assert_func_equal(func, w_real, x, rtol=rtol, atol=atol)
finally:
np.seterr(**old_errors)
def test_erfc_consistent(self):
self._check_variant_func(
cephes.erfc,
lambda z: 1 - cephes.erf(z),
rtol=1e-12,
atol=1e-14 # <- the test function loses precision
)
def test_erfcx_consistent(self):
self._check_variant_func(
cephes.erfcx,
lambda z: np.exp(z*z) * cephes.erfc(z),
rtol=1e-12
)
def test_erfi_consistent(self):
self._check_variant_func(
cephes.erfi,
lambda z: -1j * cephes.erf(1j*z),
rtol=1e-12
)
def test_dawsn_consistent(self):
self._check_variant_func(
cephes.dawsn,
lambda z: sqrt(pi)/2 * np.exp(-z*z) * cephes.erfi(z),
rtol=1e-12
)
def test_erfcinv(self):
i = special.erfcinv(1)
assert_equal(i,0)
def test_erfinv(self):
i = special.erfinv(0)
assert_equal(i,0)
def test_errprint(self):
a = special.errprint()
b = 1-a # a is the state 1-a inverts state
c = special.errprint(b) # returns last state 'a'
assert_equal(a,c)
d = special.errprint(a) # returns to original state
assert_equal(d,b) # makes sure state was returned
# assert_equal(d,1-a)
class TestEuler(TestCase):
def test_euler(self):
eu0 = special.euler(0)
eu1 = special.euler(1)
eu2 = special.euler(2) # just checking segfaults
assert_almost_equal(eu0[0],1,8)
assert_almost_equal(eu2[2],-1,8)
eu24 = special.euler(24)
mathworld = [1,1,5,61,1385,50521,2702765,199360981,
19391512145,2404879675441,
370371188237525,69348874393137901,
15514534163557086905]
correct = zeros((25,),'d')
for k in range(0,13):
if (k % 2):
correct[2*k] = -float(mathworld[k])
else:
correct[2*k] = float(mathworld[k])
olderr = np.seterr(all='ignore')
try:
err = nan_to_num((eu24-correct)/correct)
errmax = max(err)
finally:
np.seterr(**olderr)
assert_almost_equal(errmax, 0.0, 14)
class TestExp(TestCase):
def test_exp2(self):
ex = special.exp2(2)
exrl = 2**2
assert_equal(ex,exrl)
def test_exp2more(self):
exm = special.exp2(2.5)
exmrl = 2**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_exp10(self):
ex = special.exp10(2)
exrl = 10**2
assert_approx_equal(ex,exrl)
def test_exp10more(self):
exm = special.exp10(2.5)
exmrl = 10**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_expm1(self):
ex = (special.expm1(2),special.expm1(3),special.expm1(4))
exrl = (exp(2)-1,exp(3)-1,exp(4)-1)
assert_array_almost_equal(ex,exrl,8)
def test_expm1more(self):
ex1 = (special.expm1(2),special.expm1(2.1),special.expm1(2.2))
exrl1 = (exp(2)-1,exp(2.1)-1,exp(2.2)-1)
assert_array_almost_equal(ex1,exrl1,8)
class TestFactorialFunctions(TestCase):
def test_factorial(self):
assert_array_almost_equal([6., 24., 120.],
special.factorial([3, 4, 5], exact=False))
assert_equal(special.factorial(5, exact=True), 120)
def test_factorial2(self):
assert_array_almost_equal([105., 384., 945.],
special.factorial2([7, 8, 9], exact=False))
assert_equal(special.factorial2(7, exact=True), 105)
def test_factorialk(self):
assert_equal(special.factorialk(5, 1, exact=True), 120)
assert_equal(special.factorialk(5, 3, exact=True), 10)
class TestFresnel(TestCase):
def test_fresnel(self):
frs = array(special.fresnel(.5))
assert_array_almost_equal(frs,array([0.064732432859999287, 0.49234422587144644]),8)
# values from pg 329 Table 7.11 of A & S
# slightly corrected in 4th decimal place
def test_fresnel_zeros(self):
szo, czo = special.fresnel_zeros(5)
assert_array_almost_equal(szo,
array([2.0093+0.2885j,
2.8335+0.2443j,
3.4675+0.2185j,
4.0026+0.2009j,
4.4742+0.1877j]),3)
assert_array_almost_equal(czo,
array([1.7437+0.3057j,
2.6515+0.2529j,
3.3204+0.2240j,
3.8757+0.2047j,
4.3611+0.1907j]),3)
vals1 = special.fresnel(szo)[0]
vals2 = special.fresnel(czo)[1]
assert_array_almost_equal(vals1,0,14)
assert_array_almost_equal(vals2,0,14)
def test_fresnelc_zeros(self):
szo, czo = special.fresnel_zeros(6)
frc = special.fresnelc_zeros(6)
assert_array_almost_equal(frc,czo,12)
def test_fresnels_zeros(self):
szo, czo = special.fresnel_zeros(5)
frs = special.fresnels_zeros(5)
assert_array_almost_equal(frs,szo,12)
class TestGamma(TestCase):
def test_gamma(self):
gam = special.gamma(5)
assert_equal(gam,24.0)
def test_gammaln(self):
gamln = special.gammaln(3)
lngam = log(special.gamma(3))
assert_almost_equal(gamln,lngam,8)
def test_gammainc(self):
gama = special.gammainc(.5,.5)
assert_almost_equal(gama,.7,1)
def test_gammaincnan(self):
gama = special.gammainc(-1,1)
assert_(isnan(gama))
def test_gammainczero(self):
# bad arg but zero integration limit
gama = special.gammainc(-1,0)
assert_equal(gama,0.0)
def test_gammaincc(self):
gicc = special.gammaincc(.5,.5)
greal = 1 - special.gammainc(.5,.5)
assert_almost_equal(gicc,greal,8)
def test_gammainccnan(self):
gama = special.gammaincc(-1,1)
assert_(isnan(gama))
def test_gammainccinv(self):
gccinv = special.gammainccinv(.5,.5)
gcinv = special.gammaincinv(.5,.5)
assert_almost_equal(gccinv,gcinv,8)
@with_special_errors
def test_gammaincinv(self):
y = special.gammaincinv(.4,.4)
x = special.gammainc(.4,y)
assert_almost_equal(x,0.4,1)
y = special.gammainc(10, 0.05)
x = special.gammaincinv(10, 2.5715803516000736e-20)
assert_almost_equal(0.05, x, decimal=10)
assert_almost_equal(y, 2.5715803516000736e-20, decimal=10)
x = special.gammaincinv(50, 8.20754777388471303050299243573393e-18)
assert_almost_equal(11.0, x, decimal=10)
@with_special_errors
def test_975(self):
# Regression test for ticket #975 -- switch point in algorithm
# check that things work OK at the point, immediately next floats
# around it, and a bit further away
pts = [0.25,
np.nextafter(0.25, 0), 0.25 - 1e-12,
np.nextafter(0.25, 1), 0.25 + 1e-12]
for xp in pts:
y = special.gammaincinv(.4, xp)
x = special.gammainc(0.4, y)
assert_tol_equal(x, xp, rtol=1e-12)
def test_rgamma(self):
rgam = special.rgamma(8)
rlgam = 1/special.gamma(8)
assert_almost_equal(rgam,rlgam,8)
def test_infinity(self):
assert_(np.isinf(special.gamma(-1)))
assert_equal(special.rgamma(-1), 0)
class TestHankel(TestCase):
def test_negv1(self):
assert_almost_equal(special.hankel1(-3,2), -special.hankel1(3,2), 14)
def test_hankel1(self):
hank1 = special.hankel1(1,.1)
hankrl = (special.jv(1,.1) + special.yv(1,.1)*1j)
assert_almost_equal(hank1,hankrl,8)
def test_negv1e(self):
assert_almost_equal(special.hankel1e(-3,2), -special.hankel1e(3,2), 14)
def test_hankel1e(self):
hank1e = special.hankel1e(1,.1)
hankrle = special.hankel1(1,.1)*exp(-.1j)
assert_almost_equal(hank1e,hankrle,8)
def test_negv2(self):
assert_almost_equal(special.hankel2(-3,2), -special.hankel2(3,2), 14)
def test_hankel2(self):
hank2 = special.hankel2(1,.1)
hankrl2 = (special.jv(1,.1) - special.yv(1,.1)*1j)
assert_almost_equal(hank2,hankrl2,8)
def test_neg2e(self):
assert_almost_equal(special.hankel2e(-3,2), -special.hankel2e(3,2), 14)
def test_hankl2e(self):
hank2e = special.hankel2e(1,.1)
hankrl2e = special.hankel2e(1,.1)
assert_almost_equal(hank2e,hankrl2e,8)
class TestHyper(TestCase):
def test_h1vp(self):
h1 = special.h1vp(1,.1)
h1real = (special.jvp(1,.1) + special.yvp(1,.1)*1j)
assert_almost_equal(h1,h1real,8)
def test_h2vp(self):
h2 = special.h2vp(1,.1)
h2real = (special.jvp(1,.1) - special.yvp(1,.1)*1j)
assert_almost_equal(h2,h2real,8)
def test_hyp0f1(self):
# scalar input
assert_allclose(special.hyp0f1(2.5, 0.5), 1.21482702689997, rtol=1e-12)
assert_allclose(special.hyp0f1(2.5, 0), 1.0, rtol=1e-15)
# float input, expected values match mpmath
x = special.hyp0f1(3.0, [-1.5, -1, 0, 1, 1.5])
expected = np.array([0.58493659229143, 0.70566805723127, 1.0,
1.37789689539747, 1.60373685288480])
assert_allclose(x, expected, rtol=1e-12)
# complex input
x = special.hyp0f1(3.0, np.array([-1.5, -1, 0, 1, 1.5]) + 0.j)
assert_allclose(x, expected.astype(np.complex), rtol=1e-12)
# test broadcasting
x1 = [0.5, 1.5, 2.5]
x2 = [0, 1, 0.5]
x = special.hyp0f1(x1, x2)
expected = [1.0, 1.8134302039235093, 1.21482702689997]
assert_allclose(x, expected, rtol=1e-12)
x = special.hyp0f1(np.row_stack([x1] * 2), x2)
assert_allclose(x, np.row_stack([expected] * 2), rtol=1e-12)
assert_raises(ValueError, special.hyp0f1,
np.row_stack([x1] * 3), [0, 1])
def test_hyp1f1(self):
hyp1 = special.hyp1f1(.1,.1,.3)
assert_almost_equal(hyp1, 1.3498588075760032,7)
# test contributed by Moritz Deger (2008-05-29)
# http://projects.scipy.org/scipy/scipy/ticket/659
# reference data obtained from mathematica [ a, b, x, m(a,b,x)]:
# produced with test_hyp1f1.nb
ref_data = array([[-8.38132975e+00, -1.28436461e+01, -2.91081397e+01, 1.04178330e+04],
[2.91076882e+00, -6.35234333e+00, -1.27083993e+01, 6.68132725e+00],
[-1.42938258e+01, 1.80869131e-01, 1.90038728e+01, 1.01385897e+05],
[5.84069088e+00, 1.33187908e+01, 2.91290106e+01, 1.59469411e+08],
[-2.70433202e+01, -1.16274873e+01, -2.89582384e+01, 1.39900152e+24],
[4.26344966e+00, -2.32701773e+01, 1.91635759e+01, 6.13816915e+21],
[1.20514340e+01, -3.40260240e+00, 7.26832235e+00, 1.17696112e+13],
[2.77372955e+01, -1.99424687e+00, 3.61332246e+00, 3.07419615e+13],
[1.50310939e+01, -2.91198675e+01, -1.53581080e+01, -3.79166033e+02],
[1.43995827e+01, 9.84311196e+00, 1.93204553e+01, 2.55836264e+10],
[-4.08759686e+00, 1.34437025e+01, -1.42072843e+01, 1.70778449e+01],
[8.05595738e+00, -1.31019838e+01, 1.52180721e+01, 3.06233294e+21],
[1.81815804e+01, -1.42908793e+01, 9.57868793e+00, -2.84771348e+20],
[-2.49671396e+01, 1.25082843e+01, -1.71562286e+01, 2.36290426e+07],
[2.67277673e+01, 1.70315414e+01, 6.12701450e+00, 7.77917232e+03],
[2.49565476e+01, 2.91694684e+01, 6.29622660e+00, 2.35300027e+02],
[6.11924542e+00, -1.59943768e+00, 9.57009289e+00, 1.32906326e+11],
[-1.47863653e+01, 2.41691301e+01, -1.89981821e+01, 2.73064953e+03],
[2.24070483e+01, -2.93647433e+00, 8.19281432e+00, -6.42000372e+17],
[8.04042600e-01, 1.82710085e+01, -1.97814534e+01, 5.48372441e-01],
[1.39590390e+01, 1.97318686e+01, 2.37606635e+00, 5.51923681e+00],
[-4.66640483e+00, -2.00237930e+01, 7.40365095e+00, 4.50310752e+00],
[2.76821999e+01, -6.36563968e+00, 1.11533984e+01, -9.28725179e+23],
[-2.56764457e+01, 1.24544906e+00, 1.06407572e+01, 1.25922076e+01],
[3.20447808e+00, 1.30874383e+01, 2.26098014e+01, 2.03202059e+04],
[-1.24809647e+01, 4.15137113e+00, -2.92265700e+01, 2.39621411e+08],
[2.14778108e+01, -2.35162960e+00, -1.13758664e+01, 4.46882152e-01],
[-9.85469168e+00, -3.28157680e+00, 1.67447548e+01, -1.07342390e+07],
[1.08122310e+01, -2.47353236e+01, -1.15622349e+01, -2.91733796e+03],
[-2.67933347e+01, -3.39100709e+00, 2.56006986e+01, -5.29275382e+09],
[-8.60066776e+00, -8.02200924e+00, 1.07231926e+01, 1.33548320e+06],
[-1.01724238e-01, -1.18479709e+01, -2.55407104e+01, 1.55436570e+00],
[-3.93356771e+00, 2.11106818e+01, -2.57598485e+01, 2.13467840e+01],
[3.74750503e+00, 1.55687633e+01, -2.92841720e+01, 1.43873509e-02],
[6.99726781e+00, 2.69855571e+01, -1.63707771e+01, 3.08098673e-02],
[-2.31996011e+01, 3.47631054e+00, 9.75119815e-01, 1.79971073e-02],
[2.38951044e+01, -2.91460190e+01, -2.50774708e+00, 9.56934814e+00],
[1.52730825e+01, 5.77062507e+00, 1.21922003e+01, 1.32345307e+09],
[1.74673917e+01, 1.89723426e+01, 4.94903250e+00, 9.90859484e+01],
[1.88971241e+01, 2.86255413e+01, 5.52360109e-01, 1.44165360e+00],
[1.02002319e+01, -1.66855152e+01, -2.55426235e+01, 6.56481554e+02],
[-1.79474153e+01, 1.22210200e+01, -1.84058212e+01, 8.24041812e+05],
[-1.36147103e+01, 1.32365492e+00, -7.22375200e+00, 9.92446491e+05],
[7.57407832e+00, 2.59738234e+01, -1.34139168e+01, 3.64037761e-02],
[2.21110169e+00, 1.28012666e+01, 1.62529102e+01, 1.33433085e+02],
[-2.64297569e+01, -1.63176658e+01, -1.11642006e+01, -2.44797251e+13],
[-2.46622944e+01, -3.02147372e+00, 8.29159315e+00, -3.21799070e+05],
[-1.37215095e+01, -1.96680183e+01, 2.91940118e+01, 3.21457520e+12],
[-5.45566105e+00, 2.81292086e+01, 1.72548215e-01, 9.66973000e-01],
[-1.55751298e+00, -8.65703373e+00, 2.68622026e+01, -3.17190834e+16],
[2.45393609e+01, -2.70571903e+01, 1.96815505e+01, 1.80708004e+37],
[5.77482829e+00, 1.53203143e+01, 2.50534322e+01, 1.14304242e+06],
[-1.02626819e+01, 2.36887658e+01, -2.32152102e+01, 7.28965646e+02],
[-1.30833446e+00, -1.28310210e+01, 1.87275544e+01, -9.33487904e+12],
[5.83024676e+00, -1.49279672e+01, 2.44957538e+01, -7.61083070e+27],
[-2.03130747e+01, 2.59641715e+01, -2.06174328e+01, 4.54744859e+04],
[1.97684551e+01, -2.21410519e+01, -2.26728740e+01, 3.53113026e+06],
[2.73673444e+01, 2.64491725e+01, 1.57599882e+01, 1.07385118e+07],
[5.73287971e+00, 1.21111904e+01, 1.33080171e+01, 2.63220467e+03],
[-2.82751072e+01, 2.08605881e+01, 9.09838900e+00, -6.60957033e-07],
[1.87270691e+01, -1.74437016e+01, 1.52413599e+01, 6.59572851e+27],
[6.60681457e+00, -2.69449855e+00, 9.78972047e+00, -2.38587870e+12],
[1.20895561e+01, -2.51355765e+01, 2.30096101e+01, 7.58739886e+32],
[-2.44682278e+01, 2.10673441e+01, -1.36705538e+01, 4.54213550e+04],
[-4.50665152e+00, 3.72292059e+00, -4.83403707e+00, 2.68938214e+01],
[-7.46540049e+00, -1.08422222e+01, -1.72203805e+01, -2.09402162e+02],
[-2.00307551e+01, -7.50604431e+00, -2.78640020e+01, 4.15985444e+19],
[1.99890876e+01, 2.20677419e+01, -2.51301778e+01, 1.23840297e-09],
[2.03183823e+01, -7.66942559e+00, 2.10340070e+01, 1.46285095e+31],
[-2.90315825e+00, -2.55785967e+01, -9.58779316e+00, 2.65714264e-01],
[2.73960829e+01, -1.80097203e+01, -2.03070131e+00, 2.52908999e+02],
[-2.11708058e+01, -2.70304032e+01, 2.48257944e+01, 3.09027527e+08],
[2.21959758e+01, 4.00258675e+00, -1.62853977e+01, -9.16280090e-09],
[1.61661840e+01, -2.26845150e+01, 2.17226940e+01, -8.24774394e+33],
[-3.35030306e+00, 1.32670581e+00, 9.39711214e+00, -1.47303163e+01],
[7.23720726e+00, -2.29763909e+01, 2.34709682e+01, -9.20711735e+29],
[2.71013568e+01, 1.61951087e+01, -7.11388906e-01, 2.98750911e-01],
[8.40057933e+00, -7.49665220e+00, 2.95587388e+01, 6.59465635e+29],
[-1.51603423e+01, 1.94032322e+01, -7.60044357e+00, 1.05186941e+02],
[-8.83788031e+00, -2.72018313e+01, 1.88269907e+00, 1.81687019e+00],
[-1.87283712e+01, 5.87479570e+00, -1.91210203e+01, 2.52235612e+08],
[-5.61338513e-01, 2.69490237e+01, 1.16660111e-01, 9.97567783e-01],
[-5.44354025e+00, -1.26721408e+01, -4.66831036e+00, 1.06660735e-01],
[-2.18846497e+00, 2.33299566e+01, 9.62564397e+00, 3.03842061e-01],
[6.65661299e+00, -2.39048713e+01, 1.04191807e+01, 4.73700451e+13],
[-2.57298921e+01, -2.60811296e+01, 2.74398110e+01, -5.32566307e+11],
[-1.11431826e+01, -1.59420160e+01, -1.84880553e+01, -1.01514747e+02],
[6.50301931e+00, 2.59859051e+01, -2.33270137e+01, 1.22760500e-02],
[-1.94987891e+01, -2.62123262e+01, 3.90323225e+00, 1.71658894e+01],
[7.26164601e+00, -1.41469402e+01, 2.81499763e+01, -2.50068329e+31],
[-1.52424040e+01, 2.99719005e+01, -2.85753678e+01, 1.31906693e+04],
[5.24149291e+00, -1.72807223e+01, 2.22129493e+01, 2.50748475e+25],
[3.63207230e-01, -9.54120862e-02, -2.83874044e+01, 9.43854939e-01],
[-2.11326457e+00, -1.25707023e+01, 1.17172130e+00, 1.20812698e+00],
[2.48513582e+00, 1.03652647e+01, -1.84625148e+01, 6.47910997e-02],
[2.65395942e+01, 2.74794672e+01, 1.29413428e+01, 2.89306132e+05],
[-9.49445460e+00, 1.59930921e+01, -1.49596331e+01, 3.27574841e+02],
[-5.89173945e+00, 9.96742426e+00, 2.60318889e+01, -3.15842908e-01],
[-1.15387239e+01, -2.21433107e+01, -2.17686413e+01, 1.56724718e-01],
[-5.30592244e+00, -2.42752190e+01, 1.29734035e+00, 1.31985534e+00]])
for a,b,c,expected in ref_data:
result = special.hyp1f1(a,b,c)
assert_(abs(expected - result)/expected < 1e-4)
def test_hyp1f1_gh2957(self):
hyp1 = special.hyp1f1(0.5, 1.5, -709.7827128933)
hyp2 = special.hyp1f1(0.5, 1.5, -709.7827128934)
assert_almost_equal(hyp1, hyp2, 12)
def test_hyp1f2(self):
pass
def test_hyp2f0(self):
pass
def test_hyp2f1(self):
# a collection of special cases taken from AMS 55
values = [[0.5, 1, 1.5, 0.2**2, 0.5/0.2*log((1+0.2)/(1-0.2))],
[0.5, 1, 1.5, -0.2**2, 1./0.2*arctan(0.2)],
[1, 1, 2, 0.2, -1/0.2*log(1-0.2)],
[3, 3.5, 1.5, 0.2**2,
0.5/0.2/(-5)*((1+0.2)**(-5)-(1-0.2)**(-5))],
[-3, 3, 0.5, sin(0.2)**2, cos(2*3*0.2)],
[3, 4, 8, 1, special.gamma(8)*special.gamma(8-4-3)/special.gamma(8-3)/special.gamma(8-4)],
[3, 2, 3-2+1, -1, 1./2**3*sqrt(pi) *
special.gamma(1+3-2)/special.gamma(1+0.5*3-2)/special.gamma(0.5+0.5*3)],
[5, 2, 5-2+1, -1, 1./2**5*sqrt(pi) *
special.gamma(1+5-2)/special.gamma(1+0.5*5-2)/special.gamma(0.5+0.5*5)],
[4, 0.5+4, 1.5-2*4, -1./3, (8./9)**(-2*4)*special.gamma(4./3) *
special.gamma(1.5-2*4)/special.gamma(3./2)/special.gamma(4./3-2*4)],
# and some others
# ticket #424
[1.5, -0.5, 1.0, -10.0, 4.1300097765277476484],
# negative integer a or b, with c-a-b integer and x > 0.9
[-2,3,1,0.95,0.715],
[2,-3,1,0.95,-0.007],
[-6,3,1,0.95,0.0000810625],
[2,-5,1,0.95,-0.000029375],
# huge negative integers
(10, -900, 10.5, 0.99, 1.91853705796607664803709475658e-24),
(10, -900, -10.5, 0.99, 3.54279200040355710199058559155e-18),
]
for i, (a, b, c, x, v) in enumerate(values):
cv = special.hyp2f1(a, b, c, x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_hyp3f0(self):
pass
def test_hyperu(self):
val1 = special.hyperu(1,0.1,100)
assert_almost_equal(val1,0.0098153,7)
a,b = [0.3,0.6,1.2,-2.7],[1.5,3.2,-0.4,-3.2]
a,b = asarray(a), asarray(b)
z = 0.5
hypu = special.hyperu(a,b,z)
hprl = (pi/sin(pi*b))*(special.hyp1f1(a,b,z) /
(special.gamma(1+a-b)*special.gamma(b)) -
z**(1-b)*special.hyp1f1(1+a-b,2-b,z)
/ (special.gamma(a)*special.gamma(2-b)))
assert_array_almost_equal(hypu,hprl,12)
def test_hyperu_gh2287(self):
assert_almost_equal(special.hyperu(1, 1.5, 20.2),
0.048360918656699191, 12)
class TestBessel(TestCase):
def test_itj0y0(self):
it0 = array(special.itj0y0(.2))
assert_array_almost_equal(it0,array([0.19933433254006822, -0.34570883800412566]),8)
def test_it2j0y0(self):
it2 = array(special.it2j0y0(.2))
assert_array_almost_equal(it2,array([0.0049937546274601858, -0.43423067011231614]),8)
def test_negv_iv(self):
assert_equal(special.iv(3,2), special.iv(-3,2))
def test_j0(self):
oz = special.j0(.1)
ozr = special.jn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_j1(self):
o1 = special.j1(.1)
o1r = special.jn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_jn(self):
jnnr = special.jn(1,.2)
assert_almost_equal(jnnr,0.099500832639235995,8)
def test_negv_jv(self):
assert_almost_equal(special.jv(-3,2), -special.jv(3,2), 14)
def test_jv(self):
values = [[0, 0.1, 0.99750156206604002],
[2./3, 1e-8, 0.3239028506761532e-5],
[2./3, 1e-10, 0.1503423854873779e-6],
[3.1, 1e-10, 0.1711956265409013e-32],
[2./3, 4.0, -0.2325440850267039],
]
for i, (v, x, y) in enumerate(values):
yc = special.jv(v, x)
assert_almost_equal(yc, y, 8, err_msg='test #%d' % i)
def test_negv_jve(self):
assert_almost_equal(special.jve(-3,2), -special.jve(3,2), 14)
def test_jve(self):
jvexp = special.jve(1,.2)
assert_almost_equal(jvexp,0.099500832639235995,8)
jvexp1 = special.jve(1,.2+1j)
z = .2+1j
jvexpr = special.jv(1,z)*exp(-abs(z.imag))
assert_almost_equal(jvexp1,jvexpr,8)
def test_jn_zeros(self):
jn0 = special.jn_zeros(0,5)
jn1 = special.jn_zeros(1,5)
assert_array_almost_equal(jn0,array([2.4048255577,
5.5200781103,
8.6537279129,
11.7915344391,
14.9309177086]),4)
assert_array_almost_equal(jn1,array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),4)
jn102 = special.jn_zeros(102,5)
assert_tol_equal(jn102, array([110.89174935992040343,
117.83464175788308398,
123.70194191713507279,
129.02417238949092824,
134.00114761868422559]), rtol=1e-13)
jn301 = special.jn_zeros(301,5)
assert_tol_equal(jn301, array([313.59097866698830153,
323.21549776096288280,
331.22338738656748796,
338.39676338872084500,
345.03284233056064157]), rtol=1e-13)
def test_jn_zeros_slow(self):
jn0 = special.jn_zeros(0, 300)
assert_tol_equal(jn0[260-1], 816.02884495068867280, rtol=1e-13)
assert_tol_equal(jn0[280-1], 878.86068707124422606, rtol=1e-13)
assert_tol_equal(jn0[300-1], 941.69253065317954064, rtol=1e-13)
jn10 = special.jn_zeros(10, 300)
assert_tol_equal(jn10[260-1], 831.67668514305631151, rtol=1e-13)
assert_tol_equal(jn10[280-1], 894.51275095371316931, rtol=1e-13)
assert_tol_equal(jn10[300-1], 957.34826370866539775, rtol=1e-13)
jn3010 = special.jn_zeros(3010,5)
assert_tol_equal(jn3010, array([3036.86590780927,
3057.06598526482,
3073.66360690272,
3088.37736494778,
3101.86438139042]), rtol=1e-8)
def test_jnjnp_zeros(self):
jn = special.jn
def jnp(n, x):
return (jn(n-1,x) - jn(n+1,x))/2
for nt in range(1, 30):
z, n, m, t = special.jnjnp_zeros(nt)
for zz, nn, tt in zip(z, n, t):
if tt == 0:
assert_allclose(jn(nn, zz), 0, atol=1e-6)
elif tt == 1:
assert_allclose(jnp(nn, zz), 0, atol=1e-6)
else:
raise AssertionError("Invalid t return for nt=%d" % nt)
def test_jnp_zeros(self):
jnp = special.jnp_zeros(1,5)
assert_array_almost_equal(jnp, array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),4)
jnp = special.jnp_zeros(443,5)
assert_tol_equal(special.jvp(443, jnp), 0, atol=1e-15)
def test_jnyn_zeros(self):
jnz = special.jnyn_zeros(1,5)
assert_array_almost_equal(jnz,(array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),
array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),
array([2.19714,
5.42968,
8.59601,
11.74915,
14.89744]),
array([3.68302,
6.94150,
10.12340,
13.28576,
16.44006])),5)
def test_jvp(self):
jvprim = special.jvp(2,2)
jv0 = (special.jv(1,2)-special.jv(3,2))/2
assert_almost_equal(jvprim,jv0,10)
def test_k0(self):
ozk = special.k0(.1)
ozkr = special.kv(0,.1)
assert_almost_equal(ozk,ozkr,8)
def test_k0e(self):
ozke = special.k0e(.1)
ozker = special.kve(0,.1)
assert_almost_equal(ozke,ozker,8)
def test_k1(self):
o1k = special.k1(.1)
o1kr = special.kv(1,.1)
assert_almost_equal(o1k,o1kr,8)
def test_k1e(self):
o1ke = special.k1e(.1)
o1ker = special.kve(1,.1)
assert_almost_equal(o1ke,o1ker,8)
def test_jacobi(self):
a = 5*rand() - 1
b = 5*rand() - 1
P0 = special.jacobi(0,a,b)
P1 = special.jacobi(1,a,b)
P2 = special.jacobi(2,a,b)
P3 = special.jacobi(3,a,b)
assert_array_almost_equal(P0.c,[1],13)
assert_array_almost_equal(P1.c,array([a+b+2,a-b])/2.0,13)
cp = [(a+b+3)*(a+b+4), 4*(a+b+3)*(a+2), 4*(a+1)*(a+2)]
p2c = [cp[0],cp[1]-2*cp[0],cp[2]-cp[1]+cp[0]]
assert_array_almost_equal(P2.c,array(p2c)/8.0,13)
cp = [(a+b+4)*(a+b+5)*(a+b+6),6*(a+b+4)*(a+b+5)*(a+3),
12*(a+b+4)*(a+2)*(a+3),8*(a+1)*(a+2)*(a+3)]
p3c = [cp[0],cp[1]-3*cp[0],cp[2]-2*cp[1]+3*cp[0],cp[3]-cp[2]+cp[1]-cp[0]]
assert_array_almost_equal(P3.c,array(p3c)/48.0,13)
def test_kn(self):
kn1 = special.kn(0,.2)
assert_almost_equal(kn1,1.7527038555281462,8)
def test_negv_kv(self):
assert_equal(special.kv(3.0, 2.2), special.kv(-3.0, 2.2))
def test_kv0(self):
kv0 = special.kv(0,.2)
assert_almost_equal(kv0, 1.7527038555281462, 10)
def test_kv1(self):
kv1 = special.kv(1,0.2)
assert_almost_equal(kv1, 4.775972543220472, 10)
def test_kv2(self):
kv2 = special.kv(2,0.2)
assert_almost_equal(kv2, 49.51242928773287, 10)
def test_kn_largeorder(self):
assert_allclose(special.kn(32, 1), 1.7516596664574289e+43)
def test_kv_largearg(self):
assert_equal(special.kv(0, 1e19), 0)
def test_negv_kve(self):
assert_equal(special.kve(3.0, 2.2), special.kve(-3.0, 2.2))
def test_kve(self):
kve1 = special.kve(0,.2)
kv1 = special.kv(0,.2)*exp(.2)
assert_almost_equal(kve1,kv1,8)
z = .2+1j
kve2 = special.kve(0,z)
kv2 = special.kv(0,z)*exp(z)
assert_almost_equal(kve2,kv2,8)
def test_kvp_v0n1(self):
z = 2.2
assert_almost_equal(-special.kv(1,z), special.kvp(0,z, n=1), 10)
def test_kvp_n1(self):
v = 3.
z = 2.2
xc = -special.kv(v+1,z) + v/z*special.kv(v,z)
x = special.kvp(v,z, n=1)
assert_almost_equal(xc, x, 10) # this function (kvp) is broken
def test_kvp_n2(self):
v = 3.
z = 2.2
xc = (z**2+v**2-v)/z**2 * special.kv(v,z) + special.kv(v+1,z)/z
x = special.kvp(v, z, n=2)
assert_almost_equal(xc, x, 10)
def test_y0(self):
oz = special.y0(.1)
ozr = special.yn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_y1(self):
o1 = special.y1(.1)
o1r = special.yn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_y0_zeros(self):
yo,ypo = special.y0_zeros(2)
zo,zpo = special.y0_zeros(2,complex=1)
all = r_[yo,zo]
allval = r_[ypo,zpo]
assert_array_almost_equal(abs(special.yv(0.0,all)),0.0,11)
assert_array_almost_equal(abs(special.yv(1,all)-allval),0.0,11)
def test_y1_zeros(self):
y1 = special.y1_zeros(1)
assert_array_almost_equal(y1,(array([2.19714]),array([0.52079])),5)
def test_y1p_zeros(self):
y1p = special.y1p_zeros(1,complex=1)
assert_array_almost_equal(y1p,(array([0.5768+0.904j]), array([-0.7635+0.5892j])),3)
def test_yn_zeros(self):
an = special.yn_zeros(4,2)
assert_array_almost_equal(an,array([5.64515, 9.36162]),5)
an = special.yn_zeros(443,5)
assert_tol_equal(an, [450.13573091578090314, 463.05692376675001542,
472.80651546418663566, 481.27353184725625838,
488.98055964441374646], rtol=1e-15)
def test_ynp_zeros(self):
ao = special.ynp_zeros(0,2)
assert_array_almost_equal(ao,array([2.19714133, 5.42968104]),6)
ao = special.ynp_zeros(43,5)
assert_tol_equal(special.yvp(43, ao), 0, atol=1e-15)
ao = special.ynp_zeros(443,5)
assert_tol_equal(special.yvp(443, ao), 0, atol=1e-9)
def test_ynp_zeros_large_order(self):
ao = special.ynp_zeros(443,5)
assert_tol_equal(special.yvp(443, ao), 0, atol=1e-14)
def test_yn(self):
yn2n = special.yn(1,.2)
assert_almost_equal(yn2n,-3.3238249881118471,8)
def test_negv_yv(self):
assert_almost_equal(special.yv(-3,2), -special.yv(3,2), 14)
def test_yv(self):
yv2 = special.yv(1,.2)
assert_almost_equal(yv2,-3.3238249881118471,8)
def test_negv_yve(self):
assert_almost_equal(special.yve(-3,2), -special.yve(3,2), 14)
def test_yve(self):
yve2 = special.yve(1,.2)
assert_almost_equal(yve2,-3.3238249881118471,8)
yve2r = special.yv(1,.2+1j)*exp(-1)
yve22 = special.yve(1,.2+1j)
assert_almost_equal(yve22,yve2r,8)
def test_yvp(self):
yvpr = (special.yv(1,.2) - special.yv(3,.2))/2.0
yvp1 = special.yvp(2,.2)
assert_array_almost_equal(yvp1,yvpr,10)
def _cephes_vs_amos_points(self):
"""Yield points at which to compare Cephes implementation to AMOS"""
# check several points, including large-amplitude ones
for v in [-120, -100.3, -20., -10., -1., -.5,
0., 1., 12.49, 120., 301]:
for z in [-1300, -11, -10, -1, 1., 10., 200.5, 401., 600.5,
700.6, 1300, 10003]:
yield v, z
# check half-integers; these are problematic points at least
# for cephes/iv
for v in 0.5 + arange(-60, 60):
yield v, 3.5
def check_cephes_vs_amos(self, f1, f2, rtol=1e-11, atol=0, skip=None):
for v, z in self._cephes_vs_amos_points():
if skip is not None and skip(v, z):
continue
c1, c2, c3 = f1(v, z), f1(v,z+0j), f2(int(v), z)
if np.isinf(c1):
assert_(np.abs(c2) >= 1e300, (v, z))
elif np.isnan(c1):
assert_(c2.imag != 0, (v, z))
else:
assert_tol_equal(c1, c2, err_msg=(v, z), rtol=rtol, atol=atol)
if v == int(v):
assert_tol_equal(c3, c2, err_msg=(v, z),
rtol=rtol, atol=atol)
def test_jv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.jv, special.jn, rtol=1e-10, atol=1e-305)
def test_yv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305)
def test_yv_cephes_vs_amos_only_small_orders(self):
skipper = lambda v, z: (abs(v) > 50)
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305, skip=skipper)
def test_iv_cephes_vs_amos(self):
olderr = np.seterr(all='ignore')
try:
self.check_cephes_vs_amos(special.iv, special.iv, rtol=5e-9, atol=1e-305)
finally:
np.seterr(**olderr)
@dec.slow
def test_iv_cephes_vs_amos_mass_test(self):
N = 1000000
np.random.seed(1)
v = np.random.pareto(0.5, N) * (-1)**np.random.randint(2, size=N)
x = np.random.pareto(0.2, N) * (-1)**np.random.randint(2, size=N)
imsk = (np.random.randint(8, size=N) == 0)
v[imsk] = v[imsk].astype(int)
old_err = np.seterr(all='ignore')
try:
c1 = special.iv(v, x)
c2 = special.iv(v, x+0j)
# deal with differences in the inf and zero cutoffs
c1[abs(c1) > 1e300] = np.inf
c2[abs(c2) > 1e300] = np.inf
c1[abs(c1) < 1e-300] = 0
c2[abs(c2) < 1e-300] = 0
dc = abs(c1/c2 - 1)
dc[np.isnan(dc)] = 0
finally:
np.seterr(**old_err)
k = np.argmax(dc)
# Most error apparently comes from AMOS and not our implementation;
# there are some problems near integer orders there
assert_(dc[k] < 2e-7, (v[k], x[k], special.iv(v[k], x[k]), special.iv(v[k], x[k]+0j)))
def test_kv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.kv, special.kn, rtol=1e-9, atol=1e-305)
self.check_cephes_vs_amos(special.kv, special.kv, rtol=1e-9, atol=1e-305)
def test_ticket_623(self):
assert_tol_equal(special.jv(3, 4), 0.43017147387562193)
assert_tol_equal(special.jv(301, 1300), 0.0183487151115275)
assert_tol_equal(special.jv(301, 1296.0682), -0.0224174325312048)
def test_ticket_853(self):
"""Negative-order Bessels"""
# cephes
assert_tol_equal(special.jv(-1, 1), -0.4400505857449335)
assert_tol_equal(special.jv(-2, 1), 0.1149034849319005)
assert_tol_equal(special.yv(-1, 1), 0.7812128213002887)
assert_tol_equal(special.yv(-2, 1), -1.650682606816255)
assert_tol_equal(special.iv(-1, 1), 0.5651591039924851)
assert_tol_equal(special.iv(-2, 1), 0.1357476697670383)
assert_tol_equal(special.kv(-1, 1), 0.6019072301972347)
assert_tol_equal(special.kv(-2, 1), 1.624838898635178)
assert_tol_equal(special.jv(-0.5, 1), 0.43109886801837607952)
assert_tol_equal(special.yv(-0.5, 1), 0.6713967071418031)
assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)
assert_tol_equal(special.kv(-0.5, 1), 0.4610685044478945)
# amos
assert_tol_equal(special.jv(-1, 1+0j), -0.4400505857449335)
assert_tol_equal(special.jv(-2, 1+0j), 0.1149034849319005)
assert_tol_equal(special.yv(-1, 1+0j), 0.7812128213002887)
assert_tol_equal(special.yv(-2, 1+0j), -1.650682606816255)
assert_tol_equal(special.iv(-1, 1+0j), 0.5651591039924851)
assert_tol_equal(special.iv(-2, 1+0j), 0.1357476697670383)
assert_tol_equal(special.kv(-1, 1+0j), 0.6019072301972347)
assert_tol_equal(special.kv(-2, 1+0j), 1.624838898635178)
assert_tol_equal(special.jv(-0.5, 1+0j), 0.43109886801837607952)
assert_tol_equal(special.jv(-0.5, 1+1j), 0.2628946385649065-0.827050182040562j)
assert_tol_equal(special.yv(-0.5, 1+0j), 0.6713967071418031)
assert_tol_equal(special.yv(-0.5, 1+1j), 0.967901282890131+0.0602046062142816j)
assert_tol_equal(special.iv(-0.5, 1+0j), 1.231200214592967)
assert_tol_equal(special.iv(-0.5, 1+1j), 0.77070737376928+0.39891821043561j)
assert_tol_equal(special.kv(-0.5, 1+0j), 0.4610685044478945)
assert_tol_equal(special.kv(-0.5, 1+1j), 0.06868578341999-0.38157825981268j)
assert_tol_equal(special.jve(-0.5,1+0.3j), special.jv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(special.yve(-0.5,1+0.3j), special.yv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(special.ive(-0.5,0.3+1j), special.iv(-0.5, 0.3+1j)*exp(-0.3))
assert_tol_equal(special.kve(-0.5,0.3+1j), special.kv(-0.5, 0.3+1j)*exp(0.3+1j))
assert_tol_equal(special.hankel1(-0.5, 1+1j), special.jv(-0.5, 1+1j) + 1j*special.yv(-0.5,1+1j))
assert_tol_equal(special.hankel2(-0.5, 1+1j), special.jv(-0.5, 1+1j) - 1j*special.yv(-0.5,1+1j))
def test_ticket_854(self):
"""Real-valued Bessel domains"""
assert_(isnan(special.jv(0.5, -1)))
assert_(isnan(special.iv(0.5, -1)))
assert_(isnan(special.yv(0.5, -1)))
assert_(isnan(special.yv(1, -1)))
assert_(isnan(special.kv(0.5, -1)))
assert_(isnan(special.kv(1, -1)))
assert_(isnan(special.jve(0.5, -1)))
assert_(isnan(special.ive(0.5, -1)))
assert_(isnan(special.yve(0.5, -1)))
assert_(isnan(special.yve(1, -1)))
assert_(isnan(special.kve(0.5, -1)))
assert_(isnan(special.kve(1, -1)))
assert_(isnan(special.airye(-1)[0:2]).all(), special.airye(-1))
assert_(not isnan(special.airye(-1)[2:4]).any(), special.airye(-1))
def test_ticket_503(self):
"""Real-valued Bessel I overflow"""
assert_tol_equal(special.iv(1, 700), 1.528500390233901e302)
assert_tol_equal(special.iv(1000, 1120), 1.301564549405821e301)
def test_iv_hyperg_poles(self):
assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)
def iv_series(self, v, z, n=200):
k = arange(0, n).astype(float_)
r = (v+2*k)*log(.5*z) - special.gammaln(k+1) - special.gammaln(v+k+1)
r[isnan(r)] = inf
r = exp(r)
err = abs(r).max() * finfo(float_).eps * n + abs(r[-1])*10
return r.sum(), err
def test_i0_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(0, z)
assert_tol_equal(special.i0(z), value, atol=err, err_msg=z)
def test_i1_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(1, z)
assert_tol_equal(special.i1(z), value, atol=err, err_msg=z)
def test_iv_series(self):
for v in [-20., -10., -1., 0., 1., 12.49, 120.]:
for z in [1., 10., 200.5, -1+2j]:
value, err = self.iv_series(v, z)
assert_tol_equal(special.iv(v, z), value, atol=err, err_msg=(v, z))
def test_i0(self):
values = [[0.0, 1.0],
[1e-10, 1.0],
[0.1, 0.9071009258],
[0.5, 0.6450352706],
[1.0, 0.4657596077],
[2.5, 0.2700464416],
[5.0, 0.1835408126],
[20.0, 0.0897803119],
]
for i, (x, v) in enumerate(values):
cv = special.i0(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i0e(self):
oize = special.i0e(.1)
oizer = special.ive(0,.1)
assert_almost_equal(oize,oizer,8)
def test_i1(self):
values = [[0.0, 0.0],
[1e-10, 0.4999999999500000e-10],
[0.1, 0.0452984468],
[0.5, 0.1564208032],
[1.0, 0.2079104154],
[5.0, 0.1639722669],
[20.0, 0.0875062222],
]
for i, (x, v) in enumerate(values):
cv = special.i1(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i1e(self):
oi1e = special.i1e(.1)
oi1er = special.ive(1,.1)
assert_almost_equal(oi1e,oi1er,8)
def test_iti0k0(self):
iti0 = array(special.iti0k0(5))
assert_array_almost_equal(iti0,array([31.848667776169801, 1.5673873907283657]),5)
def test_it2i0k0(self):
it2k = special.it2i0k0(.1)
assert_array_almost_equal(it2k,array([0.0012503906973464409, 3.3309450354686687]),6)
def test_iv(self):
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(iv1,0.90710092578230106,10)
def test_negv_ive(self):
assert_equal(special.ive(3,2), special.ive(-3,2))
def test_ive(self):
ive1 = special.ive(0,.1)
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(ive1,iv1,10)
def test_ivp0(self):
assert_almost_equal(special.iv(1,2), special.ivp(0,2), 10)
def test_ivp(self):
y = (special.iv(0,2) + special.iv(2,2))/2
x = special.ivp(1,2)
assert_almost_equal(x,y,10)
class TestLaguerre(TestCase):
def test_laguerre(self):
lag0 = special.laguerre(0)
lag1 = special.laguerre(1)
lag2 = special.laguerre(2)
lag3 = special.laguerre(3)
lag4 = special.laguerre(4)
lag5 = special.laguerre(5)
assert_array_almost_equal(lag0.c,[1],13)
assert_array_almost_equal(lag1.c,[-1,1],13)
assert_array_almost_equal(lag2.c,array([1,-4,2])/2.0,13)
assert_array_almost_equal(lag3.c,array([-1,9,-18,6])/6.0,13)
assert_array_almost_equal(lag4.c,array([1,-16,72,-96,24])/24.0,13)
assert_array_almost_equal(lag5.c,array([-1,25,-200,600,-600,120])/120.0,13)
def test_genlaguerre(self):
k = 5*rand()-0.9
lag0 = special.genlaguerre(0,k)
lag1 = special.genlaguerre(1,k)
lag2 = special.genlaguerre(2,k)
lag3 = special.genlaguerre(3,k)
assert_equal(lag0.c,[1])
assert_equal(lag1.c,[-1,k+1])
assert_almost_equal(lag2.c,array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0)
assert_almost_equal(lag3.c,array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0)
# Base polynomials come from Abrahmowitz and Stegan
class TestLegendre(TestCase):
def test_legendre(self):
leg0 = special.legendre(0)
leg1 = special.legendre(1)
leg2 = special.legendre(2)
leg3 = special.legendre(3)
leg4 = special.legendre(4)
leg5 = special.legendre(5)
assert_equal(leg0.c,[1])
assert_equal(leg1.c,[1,0])
assert_equal(leg2.c,array([3,0,-1])/2.0)
assert_almost_equal(leg3.c,array([5,0,-3,0])/2.0)
assert_almost_equal(leg4.c,array([35,0,-30,0,3])/8.0)
assert_almost_equal(leg5.c,array([63,0,-70,0,15,0])/8.0)
class TestLambda(TestCase):
def test_lmbda(self):
lam = special.lmbda(1,.1)
lamr = (array([special.jn(0,.1), 2*special.jn(1,.1)/.1]),
array([special.jvp(0,.1), -2*special.jv(1,.1)/.01 + 2*special.jvp(1,.1)/.1]))
assert_array_almost_equal(lam,lamr,8)
class TestLog1p(TestCase):
def test_log1p(self):
l1p = (special.log1p(10), special.log1p(11), special.log1p(12))
l1prl = (log(11), log(12), log(13))
assert_array_almost_equal(l1p,l1prl,8)
def test_log1pmore(self):
l1pm = (special.log1p(1), special.log1p(1.1), special.log1p(1.2))
l1pmrl = (log(2),log(2.1),log(2.2))
assert_array_almost_equal(l1pm,l1pmrl,8)
class TestLegendreFunctions(TestCase):
def test_clpmn(self):
z = 0.5+0.3j
clp = special.clpmn(2, 2, z, 3)
assert_array_almost_equal(clp,
(array([[1.0000, z, 0.5*(3*z*z-1)],
[0.0000, sqrt(z*z-1), 3*z*sqrt(z*z-1)],
[0.0000, 0.0000, 3*(z*z-1)]]),
array([[0.0000, 1.0000, 3*z],
[0.0000, z/sqrt(z*z-1), 3*(2*z*z-1)/sqrt(z*z-1)],
[0.0000, 0.0000, 6*z]])),
7)
def test_clpmn_close_to_real_2(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 2)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 2)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x),
special.lpmv(m, n, x)]),
7)
def test_clpmn_close_to_real_3(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 3)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 3)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x)*np.exp(-0.5j*m*np.pi),
special.lpmv(m, n, x)*np.exp(0.5j*m*np.pi)]),
7)
def test_clpmn_across_unit_circle(self):
eps = 1e-7
m = 1
n = 1
x = 1j
for type in [2, 3]:
assert_almost_equal(special.clpmn(m, n, x+1j*eps, type)[0][m, n],
special.clpmn(m, n, x-1j*eps, type)[0][m, n], 6)
def test_inf(self):
for z in (1, -1):
for n in range(4):
for m in range(1, n):
lp = special.clpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
lp = special.lpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
def test_deriv_clpmn(self):
# data inside and outside of the unit circle
zvals = [0.5+0.5j, -0.5+0.5j, -0.5-0.5j, 0.5-0.5j,
1+1j, -1+1j, -1-1j, 1-1j]
m = 2
n = 3
for type in [2, 3]:
for z in zvals:
for h in [1e-3, 1e-3j]:
approx_derivative = (special.clpmn(m, n, z+0.5*h, type)[0]
- special.clpmn(m, n, z-0.5*h, type)[0])/h
assert_allclose(special.clpmn(m, n, z, type)[1],
approx_derivative,
rtol=1e-4)
def test_lpmn(self):
lp = special.lpmn(0,2,.5)
assert_array_almost_equal(lp,(array([[1.00000,
0.50000,
-0.12500]]),
array([[0.00000,
1.00000,
1.50000]])),4)
def test_lpn(self):
lpnf = special.lpn(2,.5)
assert_array_almost_equal(lpnf,(array([1.00000,
0.50000,
-0.12500]),
array([0.00000,
1.00000,
1.50000])),4)
def test_lpmv(self):
lp = special.lpmv(0,2,.5)
assert_almost_equal(lp,-0.125,7)
lp = special.lpmv(0,40,.001)
assert_almost_equal(lp,0.1252678976534484,7)
# XXX: this is outside the domain of the current implementation,
# so ensure it returns a NaN rather than a wrong answer.
olderr = np.seterr(all='ignore')
try:
lp = special.lpmv(-1,-1,.001)
finally:
np.seterr(**olderr)
assert_(lp != 0 or np.isnan(lp))
def test_lqmn(self):
lqmnf = special.lqmn(0,2,.5)
lqmnf = special.lqmn(0,2,.5)
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqmnf[0][0],lqf[0],4)
assert_array_almost_equal(lqmnf[1][0],lqf[1],4)
def test_lqmn_shape(self):
a, b = special.lqmn(4, 4, 1.1)
assert_equal(a.shape, (5, 5))
assert_equal(b.shape, (5, 5))
a, b = special.lqmn(4, 0, 1.1)
assert_equal(a.shape, (5, 1))
assert_equal(b.shape, (5, 1))
def test_lqn(self):
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqf,(array([0.5493, -0.7253, -0.8187]),
array([1.3333, 1.216, -0.8427])),4)
class TestMathieu(TestCase):
def test_mathieu_a(self):
pass
def test_mathieu_even_coef(self):
mc = special.mathieu_even_coef(2,5)
# Q not defined broken and cannot figure out proper reporting order
def test_mathieu_odd_coef(self):
pass
# same problem as above
class TestFresnelIntegral(TestCase):
def test_modfresnelp(self):
pass
def test_modfresnelm(self):
pass
class TestOblCvSeq(TestCase):
def test_obl_cv_seq(self):
obl = special.obl_cv_seq(0,3,1)
assert_array_almost_equal(obl,array([-0.348602,
1.393206,
5.486800,
11.492120]),5)
class TestParabolicCylinder(TestCase):
def test_pbdn_seq(self):
pb = special.pbdn_seq(1,.1)
assert_array_almost_equal(pb,(array([0.9975,
0.0998]),
array([-0.0499,
0.9925])),4)
def test_pbdv(self):
pbv = special.pbdv(1,.2)
derrl = 1/2*(.2)*special.pbdv(1,.2)[0] - special.pbdv(0,.2)[0]
def test_pbdv_seq(self):
pbn = special.pbdn_seq(1,.1)
pbv = special.pbdv_seq(1,.1)
assert_array_almost_equal(pbv,(real(pbn[0]),real(pbn[1])),4)
def test_pbdv_points(self):
# simple case
eta = np.linspace(-10, 10, 5)
z = 2**(eta/2)*np.sqrt(np.pi)/special.gamma(.5-.5*eta)
assert_tol_equal(special.pbdv(eta, 0.)[0], z, rtol=1e-14, atol=1e-14)
# some points
assert_tol_equal(special.pbdv(10.34, 20.44)[0], 1.3731383034455e-32, rtol=1e-12)
assert_tol_equal(special.pbdv(-9.53, 3.44)[0], 3.166735001119246e-8, rtol=1e-12)
def test_pbdv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbdv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbdv(eta, x + eps)[0] - special.pbdv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
def test_pbvv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbvv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbvv(eta, x + eps)[0] - special.pbvv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
class TestPolygamma(TestCase):
# from Table 6.2 (pg. 271) of A&S
def test_polygamma(self):
poly2 = special.polygamma(2,1)
poly3 = special.polygamma(3,1)
assert_almost_equal(poly2,-2.4041138063,10)
assert_almost_equal(poly3,6.4939394023,10)
# Test polygamma(0, x) == psi(x)
x = [2, 3, 1.1e14]
assert_almost_equal(special.polygamma(0, x), special.psi(x))
# Test broadcasting
n = [0, 1, 2]
x = [0.5, 1.5, 2.5]
expected = [-1.9635100260214238, 0.93480220054467933,
-0.23620405164172739]
assert_almost_equal(special.polygamma(n, x), expected)
expected = np.row_stack([expected]*2)
assert_almost_equal(special.polygamma(n, np.row_stack([x]*2)),
expected)
assert_almost_equal(special.polygamma(np.row_stack([n]*2), x),
expected)
class TestProCvSeq(TestCase):
def test_pro_cv_seq(self):
prol = special.pro_cv_seq(0,3,1)
assert_array_almost_equal(prol,array([0.319000,
2.593084,
6.533471,
12.514462]),5)
class TestPsi(TestCase):
def test_psi(self):
ps = special.psi(1)
assert_almost_equal(ps,-0.57721566490153287,8)
class TestRadian(TestCase):
def test_radian(self):
rad = special.radian(90,0,0)
assert_almost_equal(rad,pi/2.0,5)
def test_radianmore(self):
rad1 = special.radian(90,1,60)
assert_almost_equal(rad1,pi/2+0.0005816135199345904,5)
class TestRiccati(TestCase):
def test_riccati_jn(self):
jnrl = (special.sph_jn(1,.2)[0]*.2,special.sph_jn(1,.2)[0]+special.sph_jn(1,.2)[1]*.2)
ricjn = special.riccati_jn(1,.2)
assert_array_almost_equal(ricjn,jnrl,8)
def test_riccati_yn(self):
ynrl = (special.sph_yn(1,.2)[0]*.2,special.sph_yn(1,.2)[0]+special.sph_yn(1,.2)[1]*.2)
ricyn = special.riccati_yn(1,.2)
assert_array_almost_equal(ricyn,ynrl,8)
class TestRound(TestCase):
def test_round(self):
rnd = list(map(int,(special.round(10.1),special.round(10.4),special.round(10.5),special.round(10.6))))
# Note: According to the documentation, scipy.special.round is
# supposed to round to the nearest even number if the fractional
# part is exactly 0.5. On some platforms, this does not appear
# to work and thus this test may fail. However, this unit test is
# correctly written.
rndrl = (10,10,10,11)
assert_array_equal(rnd,rndrl)
def test_sph_harm():
# Tests derived from tables in
# http://en.wikipedia.org/wiki/Table_of_spherical_harmonics
sh = special.sph_harm
pi = np.pi
exp = np.exp
sqrt = np.sqrt
sin = np.sin
cos = np.cos
yield (assert_array_almost_equal, sh(0,0,0,0),
0.5/sqrt(pi))
yield (assert_array_almost_equal, sh(-2,2,0.,pi/4),
0.25*sqrt(15./(2.*pi)) *
(sin(pi/4))**2.)
yield (assert_array_almost_equal, sh(-2,2,0.,pi/2),
0.25*sqrt(15./(2.*pi)))
yield (assert_array_almost_equal, sh(2,2,pi,pi/2),
0.25*sqrt(15/(2.*pi)) *
exp(0+2.*pi*1j)*sin(pi/2.)**2.)
yield (assert_array_almost_equal, sh(2,4,pi/4.,pi/3.),
(3./8.)*sqrt(5./(2.*pi)) *
exp(0+2.*pi/4.*1j) *
sin(pi/3.)**2. *
(7.*cos(pi/3.)**2.-1))
yield (assert_array_almost_equal, sh(4,4,pi/8.,pi/6.),
(3./16.)*sqrt(35./(2.*pi)) *
exp(0+4.*pi/8.*1j)*sin(pi/6.)**4.)
class TestSpherical(TestCase):
def test_sph_harm(self):
# see test_sph_harm function
pass
def test_sph_in(self):
i1n = special.sph_in(1,.2)
inp0 = (i1n[0][1])
inp1 = (i1n[0][0] - 2.0/0.2 * i1n[0][1])
assert_array_almost_equal(i1n[0],array([1.0066800127054699381,
0.066933714568029540839]),12)
assert_array_almost_equal(i1n[1],[inp0,inp1],12)
def test_sph_inkn(self):
spikn = r_[special.sph_in(1,.2) + special.sph_kn(1,.2)]
inkn = r_[special.sph_inkn(1,.2)]
assert_array_almost_equal(inkn,spikn,10)
def test_sph_in_kn_order0(self):
x = 1.
sph_i0 = special.sph_in(0, x)
sph_i0_expected = np.array([np.sinh(x)/x,
np.cosh(x)/x-np.sinh(x)/x**2])
assert_array_almost_equal(r_[sph_i0], sph_i0_expected)
sph_k0 = special.sph_kn(0, x)
sph_k0_expected = np.array([0.5*pi*exp(-x)/x,
-0.5*pi*exp(-x)*(1/x+1/x**2)])
assert_array_almost_equal(r_[sph_k0], sph_k0_expected)
sph_i0k0 = special.sph_inkn(0, x)
assert_array_almost_equal(r_[sph_i0+sph_k0],
r_[sph_i0k0],
10)
def test_sph_jn(self):
s1 = special.sph_jn(2,.2)
s10 = -s1[0][1]
s11 = s1[0][0]-2.0/0.2*s1[0][1]
s12 = s1[0][1]-3.0/0.2*s1[0][2]
assert_array_almost_equal(s1[0],[0.99334665397530607731,
0.066400380670322230863,
0.0026590560795273856680],12)
assert_array_almost_equal(s1[1],[s10,s11,s12],12)
def test_sph_jnyn(self):
jnyn = r_[special.sph_jn(1,.2) + special.sph_yn(1,.2)] # tuple addition
jnyn1 = r_[special.sph_jnyn(1,.2)]
assert_array_almost_equal(jnyn1,jnyn,9)
def test_sph_kn(self):
kn = special.sph_kn(2,.2)
kn0 = -kn[0][1]
kn1 = -kn[0][0]-2.0/0.2*kn[0][1]
kn2 = -kn[0][1]-3.0/0.2*kn[0][2]
assert_array_almost_equal(kn[0],[6.4302962978445670140,
38.581777787067402086,
585.15696310385559829],12)
assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9)
def test_sph_yn(self):
sy1 = special.sph_yn(2,.2)[0][2]
sy2 = special.sph_yn(0,.2)[0][0]
sphpy = (special.sph_yn(1,.2)[0][0]-2*special.sph_yn(2,.2)[0][2])/3 # correct derivative value
assert_almost_equal(sy1,-377.52483,5) # previous values in the system
assert_almost_equal(sy2,-4.9003329,5)
sy3 = special.sph_yn(1,.2)[1][1]
assert_almost_equal(sy3,sphpy,4) # compare correct derivative val. (correct =-system val).
class TestStruve(object):
def _series(self, v, z, n=100):
"""Compute Struve function & error estimate from its power series."""
k = arange(0, n)
r = (-1)**k * (.5*z)**(2*k+v+1)/special.gamma(k+1.5)/special.gamma(k+v+1.5)
err = abs(r).max() * finfo(float_).eps * n
return r.sum(), err
def test_vs_series(self):
"""Check Struve function versus its power series"""
for v in [-20, -10, -7.99, -3.4, -1, 0, 1, 3.4, 12.49, 16]:
for z in [1, 10, 19, 21, 30]:
value, err = self._series(v, z)
assert_tol_equal(special.struve(v, z), value, rtol=0, atol=err), (v, z)
def test_some_values(self):
assert_tol_equal(special.struve(-7.99, 21), 0.0467547614113, rtol=1e-7)
assert_tol_equal(special.struve(-8.01, 21), 0.0398716951023, rtol=1e-8)
assert_tol_equal(special.struve(-3.0, 200), 0.0142134427432, rtol=1e-12)
assert_tol_equal(special.struve(-8.0, -41), 0.0192469727846, rtol=1e-11)
assert_equal(special.struve(-12, -41), -special.struve(-12, 41))
assert_equal(special.struve(+12, -41), -special.struve(+12, 41))
assert_equal(special.struve(-11, -41), +special.struve(-11, 41))
assert_equal(special.struve(+11, -41), +special.struve(+11, 41))
assert_(isnan(special.struve(-7.1, -1)))
assert_(isnan(special.struve(-10.1, -1)))
def test_regression_679(self):
"""Regression test for #679"""
assert_tol_equal(special.struve(-1.0, 20 - 1e-8), special.struve(-1.0, 20 + 1e-8))
assert_tol_equal(special.struve(-2.0, 20 - 1e-8), special.struve(-2.0, 20 + 1e-8))
assert_tol_equal(special.struve(-4.3, 20 - 1e-8), special.struve(-4.3, 20 + 1e-8))
def test_chi2_smalldf():
assert_almost_equal(special.chdtr(0.6,3), 0.957890536704110)
def test_chi2c_smalldf():
assert_almost_equal(special.chdtrc(0.6,3), 1-0.957890536704110)
def test_chi2_inv_smalldf():
assert_almost_equal(special.chdtri(0.6,1-0.957890536704110), 3)
def test_agm_simple():
assert_allclose(special.agm(24, 6), 13.4581714817)
assert_allclose(special.agm(1e30, 1), 2.2292230559453832047768593e28)
def test_legacy():
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
# Legacy behavior: truncating arguments to integers
assert_equal(special.bdtrc(1, 2, 0.3), special.bdtrc(1.8, 2.8, 0.3))
assert_equal(special.bdtr(1, 2, 0.3), special.bdtr(1.8, 2.8, 0.3))
assert_equal(special.bdtri(1, 2, 0.3), special.bdtri(1.8, 2.8, 0.3))
assert_equal(special.expn(1, 0.3), special.expn(1.8, 0.3))
assert_equal(special.hyp2f0(1, 2, 0.3, 1), special.hyp2f0(1, 2, 0.3, 1.8))
assert_equal(special.nbdtrc(1, 2, 0.3), special.nbdtrc(1.8, 2.8, 0.3))
assert_equal(special.nbdtr(1, 2, 0.3), special.nbdtr(1.8, 2.8, 0.3))
assert_equal(special.nbdtri(1, 2, 0.3), special.nbdtri(1.8, 2.8, 0.3))
assert_equal(special.pdtrc(1, 0.3), special.pdtrc(1.8, 0.3))
assert_equal(special.pdtr(1, 0.3), special.pdtr(1.8, 0.3))
assert_equal(special.pdtri(1, 0.3), special.pdtri(1.8, 0.3))
assert_equal(special.kn(1, 0.3), special.kn(1.8, 0.3))
assert_equal(special.yn(1, 0.3), special.yn(1.8, 0.3))
assert_equal(special.smirnov(1, 0.3), special.smirnov(1.8, 0.3))
assert_equal(special.smirnovi(1, 0.3), special.smirnovi(1.8, 0.3))
@with_special_errors
def test_error_raising():
assert_raises(special.SpecialFunctionWarning, special.iv, 1, 1e99j)
def test_xlogy():
def xfunc(x, y):
if x == 0 and not np.isnan(y):
return x
else:
return x*np.log(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0)], dtype=float)
z2 = np.r_[z1, [(0, 1j), (1, 1j)]]
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlogy, w1, z1, rtol=1e-13, atol=1e-13)
w2 = np.vectorize(xfunc)(z2[:,0], z2[:,1])
assert_func_equal(special.xlogy, w2, z2, rtol=1e-13, atol=1e-13)
def test_xlog1py():
def xfunc(x, y):
if x == 0 and not np.isnan(y):
return x
else:
return x * np.log1p(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0),
(1, 1e-30)], dtype=float)
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlog1py, w1, z1, rtol=1e-13, atol=1e-13)
if __name__ == "__main__":
run_module_suite()
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/scipy/special/tests/test_basic.py
|
Python
|
agpl-3.0
| 109,372
|
[
"Elk"
] |
602d90081350127a5aadbaeea0e01cb60bbaaa1bd7d69b1f4f39d76605f25a9d
|
"""
Views related to the Custom Courses feature.
"""
import csv
import datetime
import functools
import json
import logging
import pytz
from contextlib import contextmanager
from copy import deepcopy
from cStringIO import StringIO
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import (
HttpResponse,
HttpResponseForbidden,
)
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.db import transaction
from django.http import Http404
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from django.views.decorators.cache import cache_control
from django.views.decorators.csrf import ensure_csrf_cookie
from django.contrib.auth.models import User
from courseware.courses import get_course_by_id
from courseware.field_overrides import disable_overrides
from courseware.grades import iterate_grades_for
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from edxmako.shortcuts import render_to_response
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from ccx_keys.locator import CCXLocator
from student.roles import CourseCcxCoachRole
from student.models import CourseEnrollment
from instructor.access import allow_access
from instructor.views.api import _split_input_list
from instructor.views.gradebook_api import get_grade_book_page
from instructor.views.tools import get_student_from_identifier
from instructor.enrollment import (
enroll_email,
unenroll_email,
get_email_params,
)
from lms.djangoapps.ccx.models import CustomCourseForEdX
from lms.djangoapps.ccx.overrides import (
get_override_for_ccx,
override_field_for_ccx,
clear_ccx_field_info_from_ccx_map,
bulk_delete_ccx_override_fields,
)
log = logging.getLogger(__name__)
TODAY = datetime.datetime.today # for patching in tests
class CCXUserValidationException(Exception):
"""
Custom Exception for validation of users in CCX
"""
pass
def coach_dashboard(view):
"""
View decorator which enforces that the user have the CCX coach role on the
given course and goes ahead and translates the course_id from the Django
route into a course object.
"""
@functools.wraps(view)
def wrapper(request, course_id):
"""
Wraps the view function, performing access check, loading the course,
and modifying the view's call signature.
"""
course_key = CourseKey.from_string(course_id)
ccx = None
if isinstance(course_key, CCXLocator):
ccx_id = course_key.ccx
ccx = CustomCourseForEdX.objects.get(pk=ccx_id)
course_key = ccx.course_id
role = CourseCcxCoachRole(course_key)
if not role.has_user(request.user):
return HttpResponseForbidden(
_('You must be a CCX Coach to access this view.'))
course = get_course_by_id(course_key, depth=None)
# if there is a ccx, we must validate that it is the ccx for this coach
if ccx is not None:
coach_ccx = get_ccx_for_coach(course, request.user)
if coach_ccx is None or coach_ccx.id != ccx.id:
return HttpResponseForbidden(
_('You must be the coach for this ccx to access this view')
)
return view(request, course, ccx)
return wrapper
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def dashboard(request, course, ccx=None):
"""
Display the CCX Coach Dashboard.
"""
# right now, we can only have one ccx per user and course
# so, if no ccx is passed in, we can sefely redirect to that
if ccx is None:
ccx = get_ccx_for_coach(course, request.user)
if ccx:
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(course.id, ccx.id)}
)
return redirect(url)
context = {
'course': course,
'ccx': ccx,
}
if ccx:
ccx_locator = CCXLocator.from_course_locator(course.id, unicode(ccx.id))
# At this point we are done with verification that current user is ccx coach.
assign_coach_role_to_ccx(ccx_locator, request.user, course.id)
schedule = get_ccx_schedule(course, ccx)
grading_policy = get_override_for_ccx(
ccx, course, 'grading_policy', course.grading_policy)
context['schedule'] = json.dumps(schedule, indent=4)
context['save_url'] = reverse(
'save_ccx', kwargs={'course_id': ccx_locator})
context['ccx_members'] = CourseEnrollment.objects.filter(course_id=ccx_locator, is_active=True)
context['gradebook_url'] = reverse(
'ccx_gradebook', kwargs={'course_id': ccx_locator})
context['grades_csv_url'] = reverse(
'ccx_grades_csv', kwargs={'course_id': ccx_locator})
context['grading_policy'] = json.dumps(grading_policy, indent=4)
context['grading_policy_url'] = reverse(
'ccx_set_grading_policy', kwargs={'course_id': ccx_locator})
else:
context['create_ccx_url'] = reverse(
'create_ccx', kwargs={'course_id': course.id})
return render_to_response('ccx/coach_dashboard.html', context)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def create_ccx(request, course, ccx=None):
"""
Create a new CCX
"""
name = request.POST.get('name')
# prevent CCX objects from being created for deprecated course ids.
if course.id.deprecated:
messages.error(request, _(
"You cannot create a CCX from a course using a deprecated id. "
"Please create a rerun of this course in the studio to allow "
"this action."))
url = reverse('ccx_coach_dashboard', kwargs={'course_id': course.id})
return redirect(url)
ccx = CustomCourseForEdX(
course_id=course.id,
coach=request.user,
display_name=name)
ccx.save()
# Make sure start/due are overridden for entire course
start = TODAY().replace(tzinfo=pytz.UTC)
override_field_for_ccx(ccx, course, 'start', start)
override_field_for_ccx(ccx, course, 'due', None)
# Enforce a static limit for the maximum amount of students that can be enrolled
override_field_for_ccx(ccx, course, 'max_student_enrollments_allowed', settings.CCX_MAX_STUDENTS_ALLOWED)
# Hide anything that can show up in the schedule
hidden = 'visible_to_staff_only'
for chapter in course.get_children():
override_field_for_ccx(ccx, chapter, hidden, True)
for sequential in chapter.get_children():
override_field_for_ccx(ccx, sequential, hidden, True)
for vertical in sequential.get_children():
override_field_for_ccx(ccx, vertical, hidden, True)
ccx_id = CCXLocator.from_course_locator(course.id, ccx.id)
url = reverse('ccx_coach_dashboard', kwargs={'course_id': ccx_id})
# Enroll the coach in the course
email_params = get_email_params(course, auto_enroll=True, course_key=ccx_id, display_name=ccx.display_name)
enroll_email(
course_id=ccx_id,
student_email=request.user.email,
auto_enroll=True,
email_students=True,
email_params=email_params,
)
assign_coach_role_to_ccx(ccx_id, request.user, course.id)
return redirect(url)
def assign_coach_role_to_ccx(ccx_locator, user, master_course_id):
"""
Check if user has ccx_coach role on master course then assign him coach role on ccx only
if role is not already assigned. Because of this coach can open dashboard from master course
as well as ccx.
:param ccx_locator: CCX key
:param user: User to whom we want to assign role.
:param master_course_id: Master course key
"""
coach_role_on_master_course = CourseCcxCoachRole(master_course_id)
# check if user has coach role on master course
if coach_role_on_master_course.has_user(user):
# Check if user has coach role on ccx.
role = CourseCcxCoachRole(ccx_locator)
if not role.has_user(user):
# assign user role coach on ccx
with ccx_course(ccx_locator) as course:
allow_access(course, user, "ccx_coach", send_email=False)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def save_ccx(request, course, ccx=None):
"""
Save changes to CCX.
"""
if not ccx:
raise Http404
def override_fields(parent, data, graded, earliest=None, ccx_ids_to_delete=None):
"""
Recursively apply CCX schedule data to CCX by overriding the
`visible_to_staff_only`, `start` and `due` fields for units in the
course.
"""
if ccx_ids_to_delete is None:
ccx_ids_to_delete = []
blocks = {
str(child.location): child
for child in parent.get_children()}
for unit in data:
block = blocks[unit['location']]
override_field_for_ccx(
ccx, block, 'visible_to_staff_only', unit['hidden'])
start = parse_date(unit['start'])
if start:
if not earliest or start < earliest:
earliest = start
override_field_for_ccx(ccx, block, 'start', start)
else:
ccx_ids_to_delete.append(get_override_for_ccx(ccx, block, 'start_id'))
clear_ccx_field_info_from_ccx_map(ccx, block, 'start')
# Only subsection (aka sequential) and unit (aka vertical) have due dates.
if 'due' in unit: # checking that the key (due) exist in dict (unit).
due = parse_date(unit['due'])
if due:
override_field_for_ccx(ccx, block, 'due', due)
else:
ccx_ids_to_delete.append(get_override_for_ccx(ccx, block, 'due_id'))
clear_ccx_field_info_from_ccx_map(ccx, block, 'due')
else:
# In case of section aka chapter we do not have due date.
ccx_ids_to_delete.append(get_override_for_ccx(ccx, block, 'due_id'))
clear_ccx_field_info_from_ccx_map(ccx, block, 'due')
if not unit['hidden'] and block.graded:
graded[block.format] = graded.get(block.format, 0) + 1
children = unit.get('children', None)
# For a vertical, override start and due dates of all its problems.
if unit.get('category', None) == u'vertical':
for component in block.get_children():
# override start and due date of problem (Copy dates of vertical into problems)
if start:
override_field_for_ccx(ccx, component, 'start', start)
if due:
override_field_for_ccx(ccx, component, 'due', due)
if children:
override_fields(block, children, graded, earliest, ccx_ids_to_delete)
return earliest, ccx_ids_to_delete
graded = {}
earliest, ccx_ids_to_delete = override_fields(course, json.loads(request.body), graded, [])
bulk_delete_ccx_override_fields(ccx, ccx_ids_to_delete)
if earliest:
override_field_for_ccx(ccx, course, 'start', earliest)
# Attempt to automatically adjust grading policy
changed = False
policy = get_override_for_ccx(
ccx, course, 'grading_policy', course.grading_policy
)
policy = deepcopy(policy)
grader = policy['GRADER']
for section in grader:
count = graded.get(section.get('type'), 0)
if count < section.get('min_count', 0):
changed = True
section['min_count'] = count
if changed:
override_field_for_ccx(ccx, course, 'grading_policy', policy)
return HttpResponse(
json.dumps({
'schedule': get_ccx_schedule(course, ccx),
'grading_policy': json.dumps(policy, indent=4)}),
content_type='application/json',
)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def set_grading_policy(request, course, ccx=None):
"""
Set grading policy for the CCX.
"""
if not ccx:
raise Http404
override_field_for_ccx(
ccx, course, 'grading_policy', json.loads(request.POST['policy']))
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(course.id, ccx.id)}
)
return redirect(url)
def validate_date(year, month, day, hour, minute):
"""
avoid corrupting db if bad dates come in
"""
valid = True
if year < 0:
valid = False
if month < 1 or month > 12:
valid = False
if day < 1 or day > 31:
valid = False
if hour < 0 or hour > 23:
valid = False
if minute < 0 or minute > 59:
valid = False
return valid
def parse_date(datestring):
"""
Generate a UTC datetime.datetime object from a string of the form
'YYYY-MM-DD HH:MM'. If string is empty or `None`, returns `None`.
"""
if datestring:
date, time = datestring.split(' ')
year, month, day = map(int, date.split('-'))
hour, minute = map(int, time.split(':'))
if validate_date(year, month, day, hour, minute):
return datetime.datetime(
year, month, day, hour, minute, tzinfo=pytz.UTC)
return None
def get_ccx_for_coach(course, coach):
"""
Looks to see if user is coach of a CCX for this course. Returns the CCX or
None.
"""
ccxs = CustomCourseForEdX.objects.filter(
course_id=course.id,
coach=coach
)
# XXX: In the future, it would be nice to support more than one ccx per
# coach per course. This is a place where that might happen.
if ccxs.exists():
return ccxs[0]
return None
def get_date(ccx, node, date_type=None, parent_node=None):
"""
This returns override or master date for section, subsection or a unit.
:param ccx: ccx instance
:param node: chapter, subsection or unit
:param date_type: start or due
:param parent_node: parent of node
:return: start or due date
"""
date = get_override_for_ccx(ccx, node, date_type, None)
if date_type == "start":
master_date = node.start
else:
master_date = node.due
if date is not None:
# Setting override date [start or due]
date = date.strftime('%Y-%m-%d %H:%M')
elif not parent_node and master_date is not None:
# Setting date from master course
date = master_date.strftime('%Y-%m-%d %H:%M')
elif parent_node is not None:
# Set parent date (vertical has same dates as subsections)
date = get_date(ccx, node=parent_node, date_type=date_type)
return date
def get_ccx_schedule(course, ccx):
"""
Generate a JSON serializable CCX schedule.
"""
def visit(node, depth=1):
"""
Recursive generator function which yields CCX schedule nodes.
We convert dates to string to get them ready for use by the js date
widgets, which use text inputs.
Visits students visible nodes only; nodes children of hidden ones
are skipped as well.
Dates:
Only start date is applicable to a section. If ccx coach did not override start date then
getting it from the master course.
Both start and due dates are applicable to a subsection (aka sequential). If ccx coach did not override
these dates then getting these dates from corresponding subsection in master course.
Unit inherits start date and due date from its subsection. If ccx coach did not override these dates
then getting them from corresponding subsection in master course.
"""
for child in node.get_children():
# in case the children are visible to staff only, skip them
if child.visible_to_staff_only:
continue
hidden = get_override_for_ccx(
ccx, child, 'visible_to_staff_only',
child.visible_to_staff_only)
start = get_date(ccx, child, 'start')
if depth > 1:
# Subsection has both start and due dates and unit inherit dates from their subsections
if depth == 2:
due = get_date(ccx, child, 'due')
elif depth == 3:
# Get start and due date of subsection in case unit has not override dates.
due = get_date(ccx, child, 'due', node)
start = get_date(ccx, child, 'start', node)
visited = {
'location': str(child.location),
'display_name': child.display_name,
'category': child.category,
'start': start,
'due': due,
'hidden': hidden,
}
else:
visited = {
'location': str(child.location),
'display_name': child.display_name,
'category': child.category,
'start': start,
'hidden': hidden,
}
if depth < 3:
children = tuple(visit(child, depth + 1))
if children:
visited['children'] = children
yield visited
else:
yield visited
with disable_overrides():
return tuple(visit(course))
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_schedule(request, course, ccx=None): # pylint: disable=unused-argument
"""
get json representation of ccx schedule
"""
if not ccx:
raise Http404
schedule = get_ccx_schedule(course, ccx)
json_schedule = json.dumps(schedule, indent=4)
return HttpResponse(json_schedule, content_type='application/json')
def get_valid_student_email(identifier):
"""
Helper function to get an user email from an identifier and validate it.
In the UI a Coach can enroll users using both an email and an username.
This function takes care of:
- in case the identifier is an username, extracting the user object from
the DB and then the associated email
- validating the email
Arguments:
identifier (str): Username or email of the user to enroll
Returns:
str: A validated email for the user to enroll
Raises:
CCXUserValidationException: if the username is not found or the email
is not valid.
"""
user = email = None
try:
user = get_student_from_identifier(identifier)
except User.DoesNotExist:
email = identifier
else:
email = user.email
try:
validate_email(email)
except ValidationError:
raise CCXUserValidationException('Could not find a user with name or email "{0}" '.format(identifier))
return email
def _ccx_students_enrrolling_center(action, identifiers, email_students, course_key, email_params):
"""
Function to enroll/add or unenroll/revoke students.
This function exists for backwards compatibility: in CCX there are
two different views to manage students that used to implement
a different logic. Now the logic has been reconciled at the point that
this function can be used by both.
The two different views can be merged after some UI refactoring.
Arguments:
action (str): type of action to perform (add, Enroll, revoke, Unenroll)
identifiers (list): list of students username/email
email_students (bool): Flag to send an email to students
course_key (CCXLocator): a CCX course key
email_params (dict): dictionary of settings for the email to be sent
Returns:
list: list of error
"""
errors = []
if action == 'Enroll' or action == 'add':
ccx_course_overview = CourseOverview.get_from_id(course_key)
for identifier in identifiers:
if CourseEnrollment.objects.is_course_full(ccx_course_overview):
error = ('The course is full: the limit is {0}'.format(
ccx_course_overview.max_student_enrollments_allowed))
log.info("%s", error)
errors.append(error)
break
try:
email = get_valid_student_email(identifier)
except CCXUserValidationException as exp:
log.info("%s", exp)
errors.append("{0}".format(exp))
continue
enroll_email(course_key, email, auto_enroll=True, email_students=email_students, email_params=email_params)
elif action == 'Unenroll' or action == 'revoke':
for identifier in identifiers:
try:
email = get_valid_student_email(identifier)
except CCXUserValidationException as exp:
log.info("%s", exp)
errors.append("{0}".format(exp))
continue
unenroll_email(course_key, email, email_students=email_students, email_params=email_params)
return errors
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_invite(request, course, ccx=None):
"""
Invite users to new ccx
"""
if not ccx:
raise Http404
action = request.POST.get('enrollment-button')
identifiers_raw = request.POST.get('student-ids')
identifiers = _split_input_list(identifiers_raw)
email_students = 'email-students' in request.POST
course_key = CCXLocator.from_course_locator(course.id, ccx.id)
email_params = get_email_params(course, auto_enroll=True, course_key=course_key, display_name=ccx.display_name)
_ccx_students_enrrolling_center(action, identifiers, email_students, course_key, email_params)
url = reverse('ccx_coach_dashboard', kwargs={'course_id': course_key})
return redirect(url)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_student_management(request, course, ccx=None):
"""
Manage the enrollment of individual students in a CCX
"""
if not ccx:
raise Http404
action = request.POST.get('student-action', None)
student_id = request.POST.get('student-id', '')
email_students = 'email-students' in request.POST
identifiers = [student_id]
course_key = CCXLocator.from_course_locator(course.id, ccx.id)
email_params = get_email_params(course, auto_enroll=True, course_key=course_key, display_name=ccx.display_name)
errors = _ccx_students_enrrolling_center(action, identifiers, email_students, course_key, email_params)
for error_message in errors:
messages.error(request, error_message)
url = reverse('ccx_coach_dashboard', kwargs={'course_id': course_key})
return redirect(url)
@contextmanager
def ccx_course(ccx_locator):
"""Create a context in which the course identified by course_locator exists
"""
course = get_course_by_id(ccx_locator)
yield course
def prep_course_for_grading(course, request):
"""Set up course module for overrides to function properly"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
course = get_module_for_descriptor(
request.user, request, course, field_data_cache, course.id, course=course
)
course._field_data_cache = {} # pylint: disable=protected-access
course.set_grading_policy(course.grading_policy)
# Grades can potentially be written - if so, let grading manage the transaction.
@transaction.non_atomic_requests
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_gradebook(request, course, ccx=None):
"""
Show the gradebook for this CCX.
"""
if not ccx:
raise Http404
ccx_key = CCXLocator.from_course_locator(course.id, ccx.id)
with ccx_course(ccx_key) as course:
prep_course_for_grading(course, request)
student_info, page = get_grade_book_page(request, course, course_key=ccx_key)
return render_to_response('courseware/gradebook.html', {
'page': page,
'page_url': reverse('ccx_gradebook', kwargs={'course_id': ccx_key}),
'students': student_info,
'course': course,
'course_id': course.id,
'staff_access': request.user.is_staff,
'ordered_grades': sorted(
course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True),
})
# Grades can potentially be written - if so, let grading manage the transaction.
@transaction.non_atomic_requests
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_grades_csv(request, course, ccx=None):
"""
Download grades as CSV.
"""
if not ccx:
raise Http404
ccx_key = CCXLocator.from_course_locator(course.id, ccx.id)
with ccx_course(ccx_key) as course:
prep_course_for_grading(course, request)
enrolled_students = User.objects.filter(
courseenrollment__course_id=ccx_key,
courseenrollment__is_active=1
).order_by('username').select_related("profile")
grades = iterate_grades_for(course, enrolled_students)
header = None
rows = []
for student, gradeset, __ in grades:
if gradeset:
# We were able to successfully grade this student for this
# course.
if not header:
# Encode the header row in utf-8 encoding in case there are
# unicode characters
header = [section['label'].encode('utf-8')
for section in gradeset[u'section_breakdown']]
rows.append(["id", "email", "username", "grade"] + header)
percents = {
section['label']: section.get('percent', 0.0)
for section in gradeset[u'section_breakdown']
if 'label' in section
}
row_percents = [percents.get(label, 0.0) for label in header]
rows.append([student.id, student.email, student.username,
gradeset['percent']] + row_percents)
buf = StringIO()
writer = csv.writer(buf)
for row in rows:
writer.writerow(row)
response = HttpResponse(buf.getvalue(), content_type='text/csv')
response['Content-Disposition'] = 'attachment'
return response
|
Emergya/icm-openedx-educamadrid-platform-basic
|
lms/djangoapps/ccx/views.py
|
Python
|
agpl-3.0
| 27,223
|
[
"VisIt"
] |
fdfc7c21b0511cf3b824af84f56d2a992cc4aa67df78f857164fa8bb8a7b6e8d
|
__RCSID__ = "$Id$"
""" FileCatalogFactory class to create file catalog client objects according to the
configuration description
"""
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getCatalogPath
from DIRAC.Resources.Catalog.FileCatalogProxyClient import FileCatalogProxyClient
from DIRAC.Core.Utilities import ObjectLoader
class FileCatalogFactory:
def __init__( self ):
self.log = gLogger.getSubLogger( 'FileCatalogFactory' )
def createCatalog( self, catalogName, useProxy = False ):
""" Create a file catalog object from its name and CS description
"""
if useProxy:
catalog = FileCatalogProxyClient( catalogName )
return S_OK( catalog )
# get the CS description first
catalogPath = getCatalogPath( catalogName )
catalogType = gConfig.getValue( catalogPath + '/CatalogType', catalogName )
catalogURL = gConfig.getValue( catalogPath + '/CatalogURL', "DataManagement/" + catalogType )
self.log.debug( 'Creating %s client' % catalogName )
objectLoader = ObjectLoader.ObjectLoader()
result = objectLoader.loadObject( 'Resources.Catalog.%sClient' % catalogType, catalogType + 'Client' )
if not result['OK']:
gLogger.error( 'Failed to load catalog object: %s' % result['Message'] )
return result
catalogClass = result['Value']
try:
# FIXME: is it really needed? This is the factory, can't this be moved out?
if catalogType in [ 'LcgFileCatalogCombined', 'LcgFileCatalog' ]:
# The LFC special case
infoSys = gConfig.getValue( catalogPath + '/LcgGfalInfosys', '' )
host = gConfig.getValue( catalogPath + '/MasterHost', '' )
catalog = catalogClass( infoSys, host )
else:
if catalogURL:
catalog = catalogClass( url = catalogURL )
else:
catalog = catalogClass()
self.log.debug( 'Loaded module %sClient' % catalogType )
return S_OK( catalog )
except Exception, x:
errStr = "Failed to instantiate %s()" % ( catalogType )
gLogger.exception( errStr, lException = x )
return S_ERROR( errStr )
# Catalog module was not loaded
return S_ERROR( 'No suitable client found for %s' % catalogName )
|
calancha/DIRAC
|
Resources/Catalog/FileCatalogFactory.py
|
Python
|
gpl-3.0
| 2,270
|
[
"DIRAC"
] |
89950624c86707247f110838eea3353d21481e0decd0d8e175462fcaf11dbeca
|
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QMainWindow, QDockWidget, QTabWidget, QWidget, QVBoxLayout
from ert_gui import ERT
from ert_gui.ertwidgets import showWaitCursorWhileWaiting
from ert_gui.ertwidgets.models.ertmodel import getCurrentCaseName
from ert_gui.plottery import PlotContext, PlotDataGatherer as PDG, PlotConfig, plots, PlotConfigFactory
from ert_gui.tools.plot import DataTypeKeysWidget, CaseSelectionWidget, PlotWidget, DataTypeKeysListModel
from ert_gui.tools.plot.customize import PlotCustomizer
CROSS_CASE_STATISTICS = "Cross Case Statistics"
DISTRIBUTION = "Distribution"
GAUSSIAN_KDE = "Gaussian KDE"
ENSEMBLE = "Ensemble"
HISTOGRAM = "Histogram"
STATISTICS = "Statistics"
class PlotWindow(QMainWindow):
def __init__(self, parent):
QMainWindow.__init__(self, parent)
self._ert = ERT.ert
""":type: ert.enkf.enkf_main.EnKFMain"""
key_manager = self._ert.getKeyManager()
""":type: ert.enkf.key_manager.KeyManager """
self.setMinimumWidth(850)
self.setMinimumHeight(650)
self.setWindowTitle("Plotting")
self.activateWindow()
self._plot_customizer = PlotCustomizer(self, self._ert.plotConfig())
def plotConfigCreator(key):
return PlotConfigFactory.createPlotConfigForKey(self._ert, key)
self._plot_customizer.setPlotConfigCreator(plotConfigCreator)
self._plot_customizer.settingsChanged.connect(self.keySelected)
self._central_tab = QTabWidget()
self._central_tab.currentChanged.connect(self.currentPlotChanged)
central_widget = QWidget()
central_layout = QVBoxLayout()
central_layout.setContentsMargins(0, 0, 0, 0)
central_widget.setLayout(central_layout)
central_layout.addWidget(self._central_tab)
self.setCentralWidget(central_widget)
self._plot_widgets = []
""":type: list of PlotWidget"""
self._data_gatherers = []
""":type: list of PlotDataGatherer """
summary_gatherer = self.createDataGatherer(PDG.gatherSummaryData, key_manager.isSummaryKey, refcaseGatherFunc=PDG.gatherSummaryRefcaseData, observationGatherFunc=PDG.gatherSummaryObservationData, historyGatherFunc=PDG.gatherSummaryHistoryData)
gen_data_gatherer = self.createDataGatherer(PDG.gatherGenDataData, key_manager.isGenDataKey, observationGatherFunc=PDG.gatherGenDataObservationData)
gen_kw_gatherer = self.createDataGatherer(PDG.gatherGenKwData, key_manager.isGenKwKey)
custom_kw_gatherer = self.createDataGatherer(PDG.gatherCustomKwData, key_manager.isCustomKwKey)
self.addPlotWidget(ENSEMBLE, plots.plotEnsemble, [summary_gatherer, gen_data_gatherer])
self.addPlotWidget(STATISTICS, plots.plotStatistics, [summary_gatherer, gen_data_gatherer])
self.addPlotWidget(HISTOGRAM, plots.plotHistogram, [gen_kw_gatherer, custom_kw_gatherer])
self.addPlotWidget(GAUSSIAN_KDE, plots.plotGaussianKDE, [gen_kw_gatherer, custom_kw_gatherer])
self.addPlotWidget(DISTRIBUTION, plots.plotDistribution, [gen_kw_gatherer, custom_kw_gatherer])
self.addPlotWidget(CROSS_CASE_STATISTICS, plots.plotCrossCaseStatistics, [gen_kw_gatherer, custom_kw_gatherer])
data_types_key_model = DataTypeKeysListModel(self._ert)
self._data_type_keys_widget = DataTypeKeysWidget(data_types_key_model)
self._data_type_keys_widget.dataTypeKeySelected.connect(self.keySelected)
self.addDock("Data types", self._data_type_keys_widget)
current_case = getCurrentCaseName()
self._case_selection_widget = CaseSelectionWidget(current_case)
self._case_selection_widget.caseSelectionChanged.connect(self.keySelected)
self.addDock("Plot case", self._case_selection_widget)
current_plot_widget = self._plot_widgets[self._central_tab.currentIndex()]
current_plot_widget.setActive()
self._data_type_keys_widget.selectDefault()
self._updateCustomizer(current_plot_widget)
def createDataGatherer(self, dataGatherFunc, gatherConditionFunc, refcaseGatherFunc=None, observationGatherFunc=None, historyGatherFunc=None):
data_gatherer = PDG(dataGatherFunc, gatherConditionFunc, refcaseGatherFunc=refcaseGatherFunc, observationGatherFunc=observationGatherFunc, historyGatherFunc=historyGatherFunc)
self._data_gatherers.append(data_gatherer)
return data_gatherer
def currentPlotChanged(self):
for plot_widget in self._plot_widgets:
plot_widget.setActive(False)
index = self._central_tab.indexOf(plot_widget)
if index == self._central_tab.currentIndex() and plot_widget.canPlotKey(self.getSelectedKey()):
plot_widget.setActive()
self._updateCustomizer(plot_widget)
plot_widget.updatePlot()
def _updateCustomizer(self, plot_widget):
""" @type plot_widget: PlotWidget """
key = self.getSelectedKey()
key_manager = self._ert.getKeyManager()
index_type = PlotContext.UNKNOWN_AXIS
if key_manager.isGenDataKey(key):
index_type = PlotContext.INDEX_AXIS
elif key_manager.isSummaryKey(key):
index_type = PlotContext.DATE_AXIS
x_axis_type = PlotContext.UNKNOWN_AXIS
y_axis_type = PlotContext.UNKNOWN_AXIS
if plot_widget.name == ENSEMBLE:
x_axis_type = index_type
y_axis_type = PlotContext.VALUE_AXIS
elif plot_widget.name == STATISTICS:
x_axis_type = index_type
y_axis_type = PlotContext.VALUE_AXIS
elif plot_widget.name == DISTRIBUTION:
y_axis_type = PlotContext.VALUE_AXIS
elif plot_widget.name == CROSS_CASE_STATISTICS:
y_axis_type = PlotContext.VALUE_AXIS
elif plot_widget.name == HISTOGRAM:
x_axis_type = PlotContext.VALUE_AXIS
y_axis_type = PlotContext.COUNT_AXIS
elif plot_widget.name == GAUSSIAN_KDE:
x_axis_type = PlotContext.VALUE_AXIS
y_axis_type = PlotContext.DENSITY_AXIS
self._plot_customizer.setAxisTypes(x_axis_type, y_axis_type)
def createPlotContext(self, figure):
key = self.getSelectedKey()
cases = self._case_selection_widget.getPlotCaseNames()
data_gatherer = self.getDataGathererForKey(key)
plot_config = PlotConfig.createCopy(self._plot_customizer.getPlotConfig())
plot_config.setTitle(key)
return PlotContext(self._ert, figure, plot_config, cases, key, data_gatherer)
def getDataGathererForKey(self, key):
""" @rtype: PlotDataGatherer """
return next((data_gatherer for data_gatherer in self._data_gatherers if data_gatherer.canGatherDataForKey(key)), None)
def getSelectedKey(self):
return str(self._data_type_keys_widget.getSelectedItem())
def addPlotWidget(self, name, plotFunction, data_gatherers, enabled=True):
plot_condition_function_list = [data_gatherer.canGatherDataForKey for data_gatherer in data_gatherers]
plot_widget = PlotWidget(name, plotFunction, plot_condition_function_list, self.createPlotContext)
plot_widget.customizationTriggered.connect(self.toggleCustomizeDialog)
index = self._central_tab.addTab(plot_widget, name)
self._plot_widgets.append(plot_widget)
self._central_tab.setTabEnabled(index, enabled)
def addDock(self, name, widget, area=Qt.LeftDockWidgetArea, allowed_areas=Qt.AllDockWidgetAreas):
dock_widget = QDockWidget(name)
dock_widget.setObjectName("%sDock" % name)
dock_widget.setWidget(widget)
dock_widget.setAllowedAreas(allowed_areas)
dock_widget.setFeatures(QDockWidget.DockWidgetFloatable | QDockWidget.DockWidgetMovable)
self.addDockWidget(area, dock_widget)
return dock_widget
@showWaitCursorWhileWaiting
def keySelected(self):
key = self.getSelectedKey()
self._plot_customizer.switchPlotConfigHistory(key)
for plot_widget in self._plot_widgets:
plot_widget.setDirty()
index = self._central_tab.indexOf(plot_widget)
self._central_tab.setTabEnabled(index, plot_widget.canPlotKey(key))
for plot_widget in self._plot_widgets:
if plot_widget.canPlotKey(key):
plot_widget.updatePlot()
def toggleCustomizeDialog(self):
self._plot_customizer.toggleCustomizationDialog()
|
Ensembles/ert
|
python/python/ert_gui/tools/plot/plot_window.py
|
Python
|
gpl-3.0
| 8,510
|
[
"Gaussian"
] |
fc88916b106037da300c137d1e36671286c2594f3d4fe140a1f179d0eb1b6a2d
|
#!/usr/bin/env python
# add paths
import os
import sys
import traceback
import warnings
sys.path.append('../utils')
# import modules
from co2 import CO2
from fillgaps import fill
from dewpoint import dewpoint
import os, re, stat, datetime
from netCDF4 import Dataset as nc
from optparse import OptionParser
from collections import OrderedDict as od
from numpy import empty, array, zeros, concatenate, savetxt, intersect1d, inf, ones, append, resize
from .. import translator
# search for patterns in variable list
def isin(var, varlist):
vararr = array(varlist)
patt = re.compile(var + '$|' + var + '_.*')
matches = array([bool(patt.match(v)) for v in vararr])
return list(vararr[matches])
class Psims2Wth(translator.Translator):
def verify_params(self, latidx, lonidx):
return (True, "Translator %s likes the parameters" % type(self).__name__)
def run(self, latidx, lonidx):
try:
inputfile = self.config.get_dict(self.translator_type, 'inputfile', default='1.clim.nc4')
variables = self.config.get_dict(self.translator_type, 'variables', default='time,tmin,tmax,precip,solar').split(',')
tapp = self.config.get_dict(self.translator_type, 'tapp')
co2file = self.config.get_dict(self.translator_type, 'co2file')
outputfile = self.config.get_dict(self.translator_type, 'outputfile', default='GENERIC1.WTH')
filler, filler2 = -99, 10 # used for ELEV, REFHT, WNDHT, respectively
# call translator app if applicable
istmp = False
if not tapp is None:
tmpfile = inputfile + '.shift'
tapp += ' -i %s -o %s' % (inputfile, tmpfile) # add input and output file options
ret = os.system(tapp)
if ret != 0:
raise Exception('Application %s failed' % tapp)
if os.path.isfile(tmpfile): os.remove(tmpfile)
exit(1)
inputfile = tmpfile
istmp = True
# open netcdf file
infile = nc(inputfile)
# get time
vlist = infile.variables.keys()
if 'time' in vlist: # make sure time is in file
time = infile.variables['time'][:]
time_units = infile.variables['time'].units
else:
raise Exception('Missing variable time')
# get reference time
ts = time_units.split('days since ')[1].split(' ')
yr0, mth0, day0 = [int(t) for t in ts[0].split('-')[0 : 3]]
if len(ts) > 1:
hr0, min0, sec0 = [int(t) for t in ts[1].split(':')[0 : 3]]
else:
hr0 = min0 = sec0 = 0
ref = datetime.datetime(yr0, mth0, day0, hr0, min0, sec0)
# get latitude, longitude
lat, lon = infile.variables['lat'][0], infile.variables['lon'][0]
# get scenarios
ns = infile.variables['scen'].size if 'scen' in infile.variables else 1
# get all data
var_lists = od([('SRAD', ['solar', 'rad', 'rsds', 'srad']), \
('TMAX', ['tmax', 'tasmax']), \
('TMIN', ['tmin', 'tasmin']), \
('RAIN', ['precip', 'pr', 'rain', 'prcp']), \
('WIND', ['wind', 'windspeed']), \
('DEWP', ['dew', 'dewp', 'dewpoint', 'tdew']), \
('HUR', ['rhum', 'hur']), \
('HUS', ['hus']), \
('VAP', ['vap', 'vapr', 'vap']), \
('TAS', ['tas']), \
('PS', ['ps'])])
unit_names = array([['mj/m^2', 'mj/m2', 'mjm-2', 'mjm-2day-1', 'mjm-2d-1', 'mj/m^2/day', 'mj/m2/day'], \
['oc', 'degc', 'degreesc', 'c'], ['oc', 'degc', 'degreesc', 'c'], ['mm', 'mm/day'], \
['kmday-1', 'km/day', 'kmdy-1', 'km/dy'], ['oc', 'degc', 'c'], \
['%'], ['kgkg-1', 'kg/kg'], ['mb'], ['oc', 'degc', 'c'], ['mb']])
var_keys = var_lists.keys()
var_names = array(var_keys)
nt, nv = len(time), len(var_names)
alldata = empty((nv, ns, nt))
found_var = zeros(nv, dtype = bool)
for i in range(nv):
var_name = var_names[i]
var_list = var_lists[var_name]
for v in var_list:
matchvar = isin(v, variables)
if matchvar == []: continue
matchvar = matchvar[0] # take first match
if not matchvar in vlist: continue
alldata[i] = infile.variables[matchvar][:].squeeze()
alldata[i] = fill(alldata[i], time, ref, var_name)
if 'units' in infile.variables[matchvar].ncattrs():
units = infile.variables[matchvar].units
else:
units = ''
units = units.lower().replace(' ', '')
# convert units, if necessary
if var_name == 'SRAD' and units in ['wm-2', 'w/m^2', 'w/m2', 'wm**-2']: # solar
alldata[i] *= 0.0864
units = unit_names[i][0]
elif var_name in ['TMAX', 'TMIN', 'TAS', 'DEWP'] and units in ['k', 'degrees(k)', 'deg(k)']: # temperature
alldata[i] -= 273.15
units = unit_names[i][0]
elif var_name == 'RAIN' and units in ['kgm-2s-1', 'kg/m^2/s', 'kg/m2/s']: # precip
alldata[i] *= 86400
units = unit_names[i][0]
elif var_name == 'WIND': # wind
if units in ['ms-1', 'm/s']:
alldata[i] *= 86.4
units = unit_names[i][0]
elif units in ['kmh-1', 'km/h', 'kmhr-1', 'km/hr']:
alldata[i] *= 24
units = unit_names[i][0]
elif units in ['milesh-1', 'miles/h', 'mileshr-1', 'miles/hr']:
alldata[i] *= 38.624256
units = unit_names[i][0]
elif var_name in ['VAP', 'PS'] and units == 'pa': # vapor pressure
alldata[i] /= 100.
units = unit_names[i][0]
elif var_name == 'HUS' and units == 'gkg-1': # specific humidity
alldata[i] /= 1000.
units = unit_names[i][0]
elif var_name == 'HUR' and units in ['', '0-1']: # relative humidity
alldata[i] *= 100.
units = unit_names[i][0]
if not units.lower() in unit_names[i]:
raise Exception('Unknown units for %s' % var_name)
found_var[i] = True
break
if not found_var[i] and var_name in ['SRAD', 'TMAX', 'TMIN', 'RAIN']:
raise Exception('Missing necessary variable %s' % var_name)
# calculate dewpoint temperature if possible
dewp_idx = var_keys.index('DEWP')
hur_idx = var_keys.index('HUR')
hus_idx = var_keys.index('HUS')
vap_idx = var_keys.index('VAP')
tas_idx = var_keys.index('TAS')
tmin_idx = var_keys.index('TMIN')
tmax_idx = var_keys.index('TMAX')
ps_idx = var_keys.index('PS')
srad_idx = var_keys.index('SRAD')
if not found_var[dewp_idx] and intersect1d(var_lists['DEWP'], variables).size:
if found_var[vap_idx]: # use vapor pressure
alldata[dewp_idx] = dewpoint(vap = alldata[vap_idx])
found_var[dewp_idx] = True
elif found_var[hur_idx]: # use relative humidity and temperature
if found_var[tas_idx]:
alldata[dewp_idx] = dewpoint(hur = alldata[hur_idx], tas = alldata[tas_idx])
else:
alldata[dewp_idx] = dewpoint(hur = alldata[hur_idx], tmax = alldata[tmax_idx], tmin = alldata[tmin_idx])
found_var[dewp_idx] = True
elif found_var[hus_idx] and found_var[ps_idx]: # use specific humidity and surface pressure
alldata[dewp_idx] = dewpoint(hus = alldata[hus_idx], ps = alldata[ps_idx])
found_var[dewp_idx] = True
else:
raise Exception('Failed to compute dewpoint temperature')
# close input file
infile.close()
warnings.filterwarnings('ignore', '.*boolean index did not match indexed array along dimension.*')
# remove missing nonmandatory variables from array
alldata = alldata[: 6]
found_var = found_var[: 6]
nv = found_var.sum()
var_names = var_names[found_var]
alldata = alldata[found_var]
# compute day, month, year for every entry
datear = array([ref + datetime.timedelta(int(t)) for t in time])
days = array([d.timetuple().tm_yday for d in datear]) # convert to numpy array
months = array([d.month for d in datear])
years = array([d.year for d in datear])
# compute tav
tmin, tmax = alldata[tmin_idx], alldata[tmax_idx]
tav = 0.5 * (tmin.sum(axis = 1) + tmax.sum(axis = 1)) / nt # function of scen
# compute amp
monmax, monmin = -inf * ones(ns), inf * ones(ns)
for i in range(1, 13):
ismonth = months == i
if ismonth.sum():
t = 0.5 * (tmin[:, ismonth].sum(axis = 1) + tmax[:, ismonth].sum(axis = 1)) / ismonth.sum()
monmax[t > monmax] = t[t > monmax]
monmin[t < monmin] = t[t < monmin]
amp = monmax - monmin
# add co2 if available
if not co2file is None:
cobj = CO2(co2file)
co2 = cobj.selYears(years[0], years[-1])
var_names = append(var_names, 'CO2')
alldata = concatenate((alldata, resize(co2, (1, ns, nt))))
nv += 1
# round data
date = (1000 * (years % 100) + days).reshape((nt, 1))
alldata = alldata.round(1)
# ensure that after rounding tmax > tmin
bad_idx = alldata[tmax_idx] <= alldata[tmin_idx]
alldata[tmax_idx, bad_idx] = alldata[tmin_idx, bad_idx] + 0.1
# ensure srad >= 1.0
alldata[srad_idx, alldata[srad_idx] < 1.0] = 1.0
# write files
filenames = [outputfile] if ns == 1 else ['WTH' + str(i).zfill(5) + '.WTH' for i in range(ns)]
for i in range(ns):
# write header
head = '*WEATHER DATA : ' + os.path.basename(inputfile) + '\n'
head += '@ INSI LAT LONG ELEV TAV AMP REFHT WNDHT\n CI'
head += '%9.3f' % lat
head += '%9.3f' % lon
head += '%6d' % filler + '%6.1f' % tav[i] + '%6.1f' % amp[i]
head += '%6d' % filler + '%6d' % filler2 + '\n'
head += '@DATE' + ''.join(['%6s' % v for v in var_names]) + '\n'
# write body
with open(filenames[i], 'w') as f:
f.write(head)
savetxt(f, concatenate((date, alldata[:, i].T), axis = 1), fmt = ['%.5d'] + ['%6.1f'] * nv, delimiter = '')
# change permissions
f = os.open(filenames[i], os.O_RDONLY)
os.fchmod(f, stat.S_IREAD | stat.S_IWRITE | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH)
os.close(f)
# delete temporary file if necessary
if istmp:
os.remove(inputfile)
return True
except:
print "[%s]: %s" % (os.path.basename(__file__), traceback.format_exc())
return False
|
RDCEP/psims
|
pysims/translators/dssat46/psims2wth.py
|
Python
|
agpl-3.0
| 12,398
|
[
"NetCDF"
] |
3dbc1a6a106ed0497851e545bb70c7d04fa4bfb25fb1cf9ce4555a66549917af
|
#!/usr/bin/env python
"""
Start a given production
Example:
$ dirac-prod-start 381
"""
import DIRAC
from DIRAC.Core.Base.Script import Script
@Script()
def main():
# Registering arguments will automatically add their description to the help menu
Script.registerArgument("prodID: Production ID")
_, args = Script.parseCommandLine()
from DIRAC.ProductionSystem.Client.ProductionClient import ProductionClient
# get arguments
prodID = args[0]
prodClient = ProductionClient()
res = prodClient.setProductionStatus(prodID, "Active")
if res["OK"]:
DIRAC.gLogger.notice("Production %s successully started" % prodID)
else:
DIRAC.gLogger.error(res["Message"])
DIRAC.exit(-1)
DIRAC.exit(0)
if __name__ == "__main__":
main()
|
DIRACGrid/DIRAC
|
src/DIRAC/ProductionSystem/scripts/dirac_prod_start.py
|
Python
|
gpl-3.0
| 797
|
[
"DIRAC"
] |
f3ab069d5000745e12fe2f31b0f0856fa4e9a518e88979f385de74ace3e614b3
|
# flake8: noqa: S1
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
import math
from rdkit import Chem
from ._base import Descriptor
from .Weight import Weight
__all__ = ("LogS",)
_smarts_logs = {
"[NH0;X3;v3]": 0.71535,
"[NH2;X3;v3]": 0.41056,
"[nH0;X3]": 0.82535,
"[OH0;X2;v2]": 0.31464,
"[OH0;X1;v2]": 0.14787,
"[OH1;X2;v2]": 0.62998,
"[CH2;!R]": -0.35634,
"[CH3;!R]": -0.33888,
"[CH0;R]": -0.21912,
"[CH2;R]": -0.23057,
"[ch0]": -0.37570,
"[ch1]": -0.22435,
"F": -0.21728,
"Cl": -0.49721,
"Br": -0.57982,
"I": -0.51547,
}
_smarts_logs_molecules = [
(Chem.MolFromSmarts(smarts), log) for smarts, log in _smarts_logs.items()
]
class LogS(Descriptor):
r"""Filter-it™ LogS descriptor.
http://silicos-it.be.s3-website-eu-west-1.amazonaws.com/software/filter-it/1.0.2/filter-it.html#installation
"""
__slots__ = ()
since = "1.1.0"
explicit_hydrogens = False
kekulize = False
@classmethod
def preset(cls, version):
yield cls()
def dependencies(self):
return {"MW": Weight(exact=False)}
def description(self):
return "Filter-it™ LogS"
def __str__(self):
return "FilterItLogS"
def parameters(self):
return ()
def calculate(self, MW):
logS = 0.89823 - 0.10369 * math.sqrt(MW)
for smarts, log in _smarts_logs_molecules:
logS += len(self.mol.GetSubstructMatches(smarts)) * log
return logS
rtype = float
|
mordred-descriptor/mordred
|
mordred/LogS.py
|
Python
|
bsd-3-clause
| 1,540
|
[
"RDKit"
] |
f2502f1c9bd22338e5390752ed5dce8746fa2a985f6750819817fa2886ca5750
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for MLlib Python DataFrame-based APIs.
"""
import sys
if sys.version > '3':
xrange = range
basestring = str
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from shutil import rmtree
import tempfile
import array as pyarray
import numpy as np
from numpy import abs, all, arange, array, array_equal, inf, ones, tile, zeros
import inspect
import py4j
from pyspark import keyword_only, SparkContext
from pyspark.ml import Estimator, Model, Pipeline, PipelineModel, Transformer, UnaryTransformer
from pyspark.ml.classification import *
from pyspark.ml.clustering import *
from pyspark.ml.common import _java2py, _py2java
from pyspark.ml.evaluation import BinaryClassificationEvaluator, ClusteringEvaluator, \
MulticlassClassificationEvaluator, RegressionEvaluator
from pyspark.ml.feature import *
from pyspark.ml.fpm import FPGrowth, FPGrowthModel
from pyspark.ml.image import ImageSchema
from pyspark.ml.linalg import DenseMatrix, DenseMatrix, DenseVector, Matrices, MatrixUDT, \
SparseMatrix, SparseVector, Vector, VectorUDT, Vectors
from pyspark.ml.param import Param, Params, TypeConverters
from pyspark.ml.param.shared import HasInputCol, HasMaxIter, HasSeed
from pyspark.ml.recommendation import ALS
from pyspark.ml.regression import DecisionTreeRegressor, GeneralizedLinearRegression, \
LinearRegression
from pyspark.ml.stat import ChiSquareTest
from pyspark.ml.tuning import *
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaParams, JavaWrapper
from pyspark.serializers import PickleSerializer
from pyspark.sql import DataFrame, Row, SparkSession, HiveContext
from pyspark.sql.functions import rand
from pyspark.sql.types import DoubleType, IntegerType
from pyspark.storagelevel import *
from pyspark.tests import QuietTest, ReusedPySparkTestCase as PySparkTestCase
ser = PickleSerializer()
class MLlibTestCase(unittest.TestCase):
def setUp(self):
self.sc = SparkContext('local[4]', "MLlib tests")
self.spark = SparkSession(self.sc)
def tearDown(self):
self.spark.stop()
class SparkSessionTestCase(PySparkTestCase):
@classmethod
def setUpClass(cls):
PySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
@classmethod
def tearDownClass(cls):
PySparkTestCase.tearDownClass()
cls.spark.stop()
class MockDataset(DataFrame):
def __init__(self):
self.index = 0
class HasFake(Params):
def __init__(self):
super(HasFake, self).__init__()
self.fake = Param(self, "fake", "fake param")
def getFake(self):
return self.getOrDefault(self.fake)
class MockTransformer(Transformer, HasFake):
def __init__(self):
super(MockTransformer, self).__init__()
self.dataset_index = None
def _transform(self, dataset):
self.dataset_index = dataset.index
dataset.index += 1
return dataset
class MockUnaryTransformer(UnaryTransformer, DefaultParamsReadable, DefaultParamsWritable):
shift = Param(Params._dummy(), "shift", "The amount by which to shift " +
"data in a DataFrame",
typeConverter=TypeConverters.toFloat)
def __init__(self, shiftVal=1):
super(MockUnaryTransformer, self).__init__()
self._setDefault(shift=1)
self._set(shift=shiftVal)
def getShift(self):
return self.getOrDefault(self.shift)
def setShift(self, shift):
self._set(shift=shift)
def createTransformFunc(self):
shiftVal = self.getShift()
return lambda x: x + shiftVal
def outputDataType(self):
return DoubleType()
def validateInputType(self, inputType):
if inputType != DoubleType():
raise TypeError("Bad input type: {}. ".format(inputType) +
"Requires Double.")
class MockEstimator(Estimator, HasFake):
def __init__(self):
super(MockEstimator, self).__init__()
self.dataset_index = None
def _fit(self, dataset):
self.dataset_index = dataset.index
model = MockModel()
self._copyValues(model)
return model
class MockModel(MockTransformer, Model, HasFake):
pass
class JavaWrapperMemoryTests(SparkSessionTestCase):
def test_java_object_gets_detached(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
lr = LinearRegression(maxIter=1, regParam=0.0, solver="normal", weightCol="weight",
fitIntercept=False)
model = lr.fit(df)
summary = model.summary
self.assertIsInstance(model, JavaWrapper)
self.assertIsInstance(summary, JavaWrapper)
self.assertIsInstance(model, JavaParams)
self.assertNotIsInstance(summary, JavaParams)
error_no_object = 'Target Object ID does not exist for this gateway'
self.assertIn("LinearRegression_", model._java_obj.toString())
self.assertIn("LinearRegressionTrainingSummary", summary._java_obj.toString())
model.__del__()
with self.assertRaisesRegexp(py4j.protocol.Py4JError, error_no_object):
model._java_obj.toString()
self.assertIn("LinearRegressionTrainingSummary", summary._java_obj.toString())
try:
summary.__del__()
except:
pass
with self.assertRaisesRegexp(py4j.protocol.Py4JError, error_no_object):
model._java_obj.toString()
with self.assertRaisesRegexp(py4j.protocol.Py4JError, error_no_object):
summary._java_obj.toString()
class ParamTypeConversionTests(PySparkTestCase):
"""
Test that param type conversion happens.
"""
def test_int(self):
lr = LogisticRegression(maxIter=5.0)
self.assertEqual(lr.getMaxIter(), 5)
self.assertTrue(type(lr.getMaxIter()) == int)
self.assertRaises(TypeError, lambda: LogisticRegression(maxIter="notAnInt"))
self.assertRaises(TypeError, lambda: LogisticRegression(maxIter=5.1))
def test_float(self):
lr = LogisticRegression(tol=1)
self.assertEqual(lr.getTol(), 1.0)
self.assertTrue(type(lr.getTol()) == float)
self.assertRaises(TypeError, lambda: LogisticRegression(tol="notAFloat"))
def test_vector(self):
ewp = ElementwiseProduct(scalingVec=[1, 3])
self.assertEqual(ewp.getScalingVec(), DenseVector([1.0, 3.0]))
ewp = ElementwiseProduct(scalingVec=np.array([1.2, 3.4]))
self.assertEqual(ewp.getScalingVec(), DenseVector([1.2, 3.4]))
self.assertRaises(TypeError, lambda: ElementwiseProduct(scalingVec=["a", "b"]))
def test_list(self):
l = [0, 1]
for lst_like in [l, np.array(l), DenseVector(l), SparseVector(len(l),
range(len(l)), l), pyarray.array('l', l), xrange(2), tuple(l)]:
converted = TypeConverters.toList(lst_like)
self.assertEqual(type(converted), list)
self.assertListEqual(converted, l)
def test_list_int(self):
for indices in [[1.0, 2.0], np.array([1.0, 2.0]), DenseVector([1.0, 2.0]),
SparseVector(2, {0: 1.0, 1: 2.0}), xrange(1, 3), (1.0, 2.0),
pyarray.array('d', [1.0, 2.0])]:
vs = VectorSlicer(indices=indices)
self.assertListEqual(vs.getIndices(), [1, 2])
self.assertTrue(all([type(v) == int for v in vs.getIndices()]))
self.assertRaises(TypeError, lambda: VectorSlicer(indices=["a", "b"]))
def test_list_float(self):
b = Bucketizer(splits=[1, 4])
self.assertEqual(b.getSplits(), [1.0, 4.0])
self.assertTrue(all([type(v) == float for v in b.getSplits()]))
self.assertRaises(TypeError, lambda: Bucketizer(splits=["a", 1.0]))
def test_list_string(self):
for labels in [np.array(['a', u'b']), ['a', u'b'], np.array(['a', 'b'])]:
idx_to_string = IndexToString(labels=labels)
self.assertListEqual(idx_to_string.getLabels(), ['a', 'b'])
self.assertRaises(TypeError, lambda: IndexToString(labels=['a', 2]))
def test_string(self):
lr = LogisticRegression()
for col in ['features', u'features', np.str_('features')]:
lr.setFeaturesCol(col)
self.assertEqual(lr.getFeaturesCol(), 'features')
self.assertRaises(TypeError, lambda: LogisticRegression(featuresCol=2.3))
def test_bool(self):
self.assertRaises(TypeError, lambda: LogisticRegression(fitIntercept=1))
self.assertRaises(TypeError, lambda: LogisticRegression(fitIntercept="false"))
class PipelineTests(PySparkTestCase):
def test_pipeline(self):
dataset = MockDataset()
estimator0 = MockEstimator()
transformer1 = MockTransformer()
estimator2 = MockEstimator()
transformer3 = MockTransformer()
pipeline = Pipeline(stages=[estimator0, transformer1, estimator2, transformer3])
pipeline_model = pipeline.fit(dataset, {estimator0.fake: 0, transformer1.fake: 1})
model0, transformer1, model2, transformer3 = pipeline_model.stages
self.assertEqual(0, model0.dataset_index)
self.assertEqual(0, model0.getFake())
self.assertEqual(1, transformer1.dataset_index)
self.assertEqual(1, transformer1.getFake())
self.assertEqual(2, dataset.index)
self.assertIsNone(model2.dataset_index, "The last model shouldn't be called in fit.")
self.assertIsNone(transformer3.dataset_index,
"The last transformer shouldn't be called in fit.")
dataset = pipeline_model.transform(dataset)
self.assertEqual(2, model0.dataset_index)
self.assertEqual(3, transformer1.dataset_index)
self.assertEqual(4, model2.dataset_index)
self.assertEqual(5, transformer3.dataset_index)
self.assertEqual(6, dataset.index)
def test_identity_pipeline(self):
dataset = MockDataset()
def doTransform(pipeline):
pipeline_model = pipeline.fit(dataset)
return pipeline_model.transform(dataset)
# check that empty pipeline did not perform any transformation
self.assertEqual(dataset.index, doTransform(Pipeline(stages=[])).index)
# check that failure to set stages param will raise KeyError for missing param
self.assertRaises(KeyError, lambda: doTransform(Pipeline()))
class TestParams(HasMaxIter, HasInputCol, HasSeed):
"""
A subclass of Params mixed with HasMaxIter, HasInputCol and HasSeed.
"""
@keyword_only
def __init__(self, seed=None):
super(TestParams, self).__init__()
self._setDefault(maxIter=10)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, seed=None):
"""
setParams(self, seed=None)
Sets params for this test.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
class OtherTestParams(HasMaxIter, HasInputCol, HasSeed):
"""
A subclass of Params mixed with HasMaxIter, HasInputCol and HasSeed.
"""
@keyword_only
def __init__(self, seed=None):
super(OtherTestParams, self).__init__()
self._setDefault(maxIter=10)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, seed=None):
"""
setParams(self, seed=None)
Sets params for this test.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
class HasThrowableProperty(Params):
def __init__(self):
super(HasThrowableProperty, self).__init__()
self.p = Param(self, "none", "empty param")
@property
def test_property(self):
raise RuntimeError("Test property to raise error when invoked")
class ParamTests(SparkSessionTestCase):
def test_copy_new_parent(self):
testParams = TestParams()
# Copying an instantiated param should fail
with self.assertRaises(ValueError):
testParams.maxIter._copy_new_parent(testParams)
# Copying a dummy param should succeed
TestParams.maxIter._copy_new_parent(testParams)
maxIter = testParams.maxIter
self.assertEqual(maxIter.name, "maxIter")
self.assertEqual(maxIter.doc, "max number of iterations (>= 0).")
self.assertTrue(maxIter.parent == testParams.uid)
def test_param(self):
testParams = TestParams()
maxIter = testParams.maxIter
self.assertEqual(maxIter.name, "maxIter")
self.assertEqual(maxIter.doc, "max number of iterations (>= 0).")
self.assertTrue(maxIter.parent == testParams.uid)
def test_hasparam(self):
testParams = TestParams()
self.assertTrue(all([testParams.hasParam(p.name) for p in testParams.params]))
self.assertFalse(testParams.hasParam("notAParameter"))
self.assertTrue(testParams.hasParam(u"maxIter"))
def test_resolveparam(self):
testParams = TestParams()
self.assertEqual(testParams._resolveParam(testParams.maxIter), testParams.maxIter)
self.assertEqual(testParams._resolveParam("maxIter"), testParams.maxIter)
self.assertEqual(testParams._resolveParam(u"maxIter"), testParams.maxIter)
if sys.version_info[0] >= 3:
# In Python 3, it is allowed to get/set attributes with non-ascii characters.
e_cls = AttributeError
else:
e_cls = UnicodeEncodeError
self.assertRaises(e_cls, lambda: testParams._resolveParam(u"아"))
def test_params(self):
testParams = TestParams()
maxIter = testParams.maxIter
inputCol = testParams.inputCol
seed = testParams.seed
params = testParams.params
self.assertEqual(params, [inputCol, maxIter, seed])
self.assertTrue(testParams.hasParam(maxIter.name))
self.assertTrue(testParams.hasDefault(maxIter))
self.assertFalse(testParams.isSet(maxIter))
self.assertTrue(testParams.isDefined(maxIter))
self.assertEqual(testParams.getMaxIter(), 10)
testParams.setMaxIter(100)
self.assertTrue(testParams.isSet(maxIter))
self.assertEqual(testParams.getMaxIter(), 100)
self.assertTrue(testParams.hasParam(inputCol.name))
self.assertFalse(testParams.hasDefault(inputCol))
self.assertFalse(testParams.isSet(inputCol))
self.assertFalse(testParams.isDefined(inputCol))
with self.assertRaises(KeyError):
testParams.getInputCol()
otherParam = Param(Params._dummy(), "otherParam", "Parameter used to test that " +
"set raises an error for a non-member parameter.",
typeConverter=TypeConverters.toString)
with self.assertRaises(ValueError):
testParams.set(otherParam, "value")
# Since the default is normally random, set it to a known number for debug str
testParams._setDefault(seed=41)
testParams.setSeed(43)
self.assertEqual(
testParams.explainParams(),
"\n".join(["inputCol: input column name. (undefined)",
"maxIter: max number of iterations (>= 0). (default: 10, current: 100)",
"seed: random seed. (default: 41, current: 43)"]))
def test_kmeans_param(self):
algo = KMeans()
self.assertEqual(algo.getInitMode(), "k-means||")
algo.setK(10)
self.assertEqual(algo.getK(), 10)
algo.setInitSteps(10)
self.assertEqual(algo.getInitSteps(), 10)
self.assertEqual(algo.getDistanceMeasure(), "euclidean")
algo.setDistanceMeasure("cosine")
self.assertEqual(algo.getDistanceMeasure(), "cosine")
def test_hasseed(self):
noSeedSpecd = TestParams()
withSeedSpecd = TestParams(seed=42)
other = OtherTestParams()
# Check that we no longer use 42 as the magic number
self.assertNotEqual(noSeedSpecd.getSeed(), 42)
origSeed = noSeedSpecd.getSeed()
# Check that we only compute the seed once
self.assertEqual(noSeedSpecd.getSeed(), origSeed)
# Check that a specified seed is honored
self.assertEqual(withSeedSpecd.getSeed(), 42)
# Check that a different class has a different seed
self.assertNotEqual(other.getSeed(), noSeedSpecd.getSeed())
def test_param_property_error(self):
param_store = HasThrowableProperty()
self.assertRaises(RuntimeError, lambda: param_store.test_property)
params = param_store.params # should not invoke the property 'test_property'
self.assertEqual(len(params), 1)
def test_word2vec_param(self):
model = Word2Vec().setWindowSize(6)
# Check windowSize is set properly
self.assertEqual(model.getWindowSize(), 6)
def test_copy_param_extras(self):
tp = TestParams(seed=42)
extra = {tp.getParam(TestParams.inputCol.name): "copy_input"}
tp_copy = tp.copy(extra=extra)
self.assertEqual(tp.uid, tp_copy.uid)
self.assertEqual(tp.params, tp_copy.params)
for k, v in extra.items():
self.assertTrue(tp_copy.isDefined(k))
self.assertEqual(tp_copy.getOrDefault(k), v)
copied_no_extra = {}
for k, v in tp_copy._paramMap.items():
if k not in extra:
copied_no_extra[k] = v
self.assertEqual(tp._paramMap, copied_no_extra)
self.assertEqual(tp._defaultParamMap, tp_copy._defaultParamMap)
def test_logistic_regression_check_thresholds(self):
self.assertIsInstance(
LogisticRegression(threshold=0.5, thresholds=[0.5, 0.5]),
LogisticRegression
)
self.assertRaisesRegexp(
ValueError,
"Logistic Regression getThreshold found inconsistent.*$",
LogisticRegression, threshold=0.42, thresholds=[0.5, 0.5]
)
def test_preserve_set_state(self):
dataset = self.spark.createDataFrame([(0.5,)], ["data"])
binarizer = Binarizer(inputCol="data")
self.assertFalse(binarizer.isSet("threshold"))
binarizer.transform(dataset)
binarizer._transfer_params_from_java()
self.assertFalse(binarizer.isSet("threshold"),
"Params not explicitly set should remain unset after transform")
def test_default_params_transferred(self):
dataset = self.spark.createDataFrame([(0.5,)], ["data"])
binarizer = Binarizer(inputCol="data")
# intentionally change the pyspark default, but don't set it
binarizer._defaultParamMap[binarizer.outputCol] = "my_default"
result = binarizer.transform(dataset).select("my_default").collect()
self.assertFalse(binarizer.isSet(binarizer.outputCol))
self.assertEqual(result[0][0], 1.0)
@staticmethod
def check_params(test_self, py_stage, check_params_exist=True):
"""
Checks common requirements for Params.params:
- set of params exist in Java and Python and are ordered by names
- param parent has the same UID as the object's UID
- default param value from Java matches value in Python
- optionally check if all params from Java also exist in Python
"""
py_stage_str = "%s %s" % (type(py_stage), py_stage)
if not hasattr(py_stage, "_to_java"):
return
java_stage = py_stage._to_java()
if java_stage is None:
return
test_self.assertEqual(py_stage.uid, java_stage.uid(), msg=py_stage_str)
if check_params_exist:
param_names = [p.name for p in py_stage.params]
java_params = list(java_stage.params())
java_param_names = [jp.name() for jp in java_params]
test_self.assertEqual(
param_names, sorted(java_param_names),
"Param list in Python does not match Java for %s:\nJava = %s\nPython = %s"
% (py_stage_str, java_param_names, param_names))
for p in py_stage.params:
test_self.assertEqual(p.parent, py_stage.uid)
java_param = java_stage.getParam(p.name)
py_has_default = py_stage.hasDefault(p)
java_has_default = java_stage.hasDefault(java_param)
test_self.assertEqual(py_has_default, java_has_default,
"Default value mismatch of param %s for Params %s"
% (p.name, str(py_stage)))
if py_has_default:
if p.name == "seed":
continue # Random seeds between Spark and PySpark are different
java_default = _java2py(test_self.sc,
java_stage.clear(java_param).getOrDefault(java_param))
py_stage._clear(p)
py_default = py_stage.getOrDefault(p)
# equality test for NaN is always False
if isinstance(java_default, float) and np.isnan(java_default):
java_default = "NaN"
py_default = "NaN" if np.isnan(py_default) else "not NaN"
test_self.assertEqual(
java_default, py_default,
"Java default %s != python default %s of param %s for Params %s"
% (str(java_default), str(py_default), p.name, str(py_stage)))
class EvaluatorTests(SparkSessionTestCase):
def test_java_params(self):
"""
This tests a bug fixed by SPARK-18274 which causes multiple copies
of a Params instance in Python to be linked to the same Java instance.
"""
evaluator = RegressionEvaluator(metricName="r2")
df = self.spark.createDataFrame([Row(label=1.0, prediction=1.1)])
evaluator.evaluate(df)
self.assertEqual(evaluator._java_obj.getMetricName(), "r2")
evaluatorCopy = evaluator.copy({evaluator.metricName: "mae"})
evaluator.evaluate(df)
evaluatorCopy.evaluate(df)
self.assertEqual(evaluator._java_obj.getMetricName(), "r2")
self.assertEqual(evaluatorCopy._java_obj.getMetricName(), "mae")
def test_clustering_evaluator_with_cosine_distance(self):
featureAndPredictions = map(lambda x: (Vectors.dense(x[0]), x[1]),
[([1.0, 1.0], 1.0), ([10.0, 10.0], 1.0), ([1.0, 0.5], 2.0),
([10.0, 4.4], 2.0), ([-1.0, 1.0], 3.0), ([-100.0, 90.0], 3.0)])
dataset = self.spark.createDataFrame(featureAndPredictions, ["features", "prediction"])
evaluator = ClusteringEvaluator(predictionCol="prediction", distanceMeasure="cosine")
self.assertEqual(evaluator.getDistanceMeasure(), "cosine")
self.assertTrue(np.isclose(evaluator.evaluate(dataset), 0.992671213, atol=1e-5))
class FeatureTests(SparkSessionTestCase):
def test_binarizer(self):
b0 = Binarizer()
self.assertListEqual(b0.params, [b0.inputCol, b0.outputCol, b0.threshold])
self.assertTrue(all([~b0.isSet(p) for p in b0.params]))
self.assertTrue(b0.hasDefault(b0.threshold))
self.assertEqual(b0.getThreshold(), 0.0)
b0.setParams(inputCol="input", outputCol="output").setThreshold(1.0)
self.assertTrue(all([b0.isSet(p) for p in b0.params]))
self.assertEqual(b0.getThreshold(), 1.0)
self.assertEqual(b0.getInputCol(), "input")
self.assertEqual(b0.getOutputCol(), "output")
b0c = b0.copy({b0.threshold: 2.0})
self.assertEqual(b0c.uid, b0.uid)
self.assertListEqual(b0c.params, b0.params)
self.assertEqual(b0c.getThreshold(), 2.0)
b1 = Binarizer(threshold=2.0, inputCol="input", outputCol="output")
self.assertNotEqual(b1.uid, b0.uid)
self.assertEqual(b1.getThreshold(), 2.0)
self.assertEqual(b1.getInputCol(), "input")
self.assertEqual(b1.getOutputCol(), "output")
def test_idf(self):
dataset = self.spark.createDataFrame([
(DenseVector([1.0, 2.0]),),
(DenseVector([0.0, 1.0]),),
(DenseVector([3.0, 0.2]),)], ["tf"])
idf0 = IDF(inputCol="tf")
self.assertListEqual(idf0.params, [idf0.inputCol, idf0.minDocFreq, idf0.outputCol])
idf0m = idf0.fit(dataset, {idf0.outputCol: "idf"})
self.assertEqual(idf0m.uid, idf0.uid,
"Model should inherit the UID from its parent estimator.")
output = idf0m.transform(dataset)
self.assertIsNotNone(output.head().idf)
# Test that parameters transferred to Python Model
ParamTests.check_params(self, idf0m)
def test_ngram(self):
dataset = self.spark.createDataFrame([
Row(input=["a", "b", "c", "d", "e"])])
ngram0 = NGram(n=4, inputCol="input", outputCol="output")
self.assertEqual(ngram0.getN(), 4)
self.assertEqual(ngram0.getInputCol(), "input")
self.assertEqual(ngram0.getOutputCol(), "output")
transformedDF = ngram0.transform(dataset)
self.assertEqual(transformedDF.head().output, ["a b c d", "b c d e"])
def test_stopwordsremover(self):
dataset = self.spark.createDataFrame([Row(input=["a", "panda"])])
stopWordRemover = StopWordsRemover(inputCol="input", outputCol="output")
# Default
self.assertEqual(stopWordRemover.getInputCol(), "input")
transformedDF = stopWordRemover.transform(dataset)
self.assertEqual(transformedDF.head().output, ["panda"])
self.assertEqual(type(stopWordRemover.getStopWords()), list)
self.assertTrue(isinstance(stopWordRemover.getStopWords()[0], basestring))
# Custom
stopwords = ["panda"]
stopWordRemover.setStopWords(stopwords)
self.assertEqual(stopWordRemover.getInputCol(), "input")
self.assertEqual(stopWordRemover.getStopWords(), stopwords)
transformedDF = stopWordRemover.transform(dataset)
self.assertEqual(transformedDF.head().output, ["a"])
# with language selection
stopwords = StopWordsRemover.loadDefaultStopWords("turkish")
dataset = self.spark.createDataFrame([Row(input=["acaba", "ama", "biri"])])
stopWordRemover.setStopWords(stopwords)
self.assertEqual(stopWordRemover.getStopWords(), stopwords)
transformedDF = stopWordRemover.transform(dataset)
self.assertEqual(transformedDF.head().output, [])
def test_count_vectorizer_with_binary(self):
dataset = self.spark.createDataFrame([
(0, "a a a b b c".split(' '), SparseVector(3, {0: 1.0, 1: 1.0, 2: 1.0}),),
(1, "a a".split(' '), SparseVector(3, {0: 1.0}),),
(2, "a b".split(' '), SparseVector(3, {0: 1.0, 1: 1.0}),),
(3, "c".split(' '), SparseVector(3, {2: 1.0}),)], ["id", "words", "expected"])
cv = CountVectorizer(binary=True, inputCol="words", outputCol="features")
model = cv.fit(dataset)
transformedList = model.transform(dataset).select("features", "expected").collect()
for r in transformedList:
feature, expected = r
self.assertEqual(feature, expected)
def test_count_vectorizer_with_maxDF(self):
dataset = self.spark.createDataFrame([
(0, "a b c d".split(' '), SparseVector(3, {0: 1.0, 1: 1.0, 2: 1.0}),),
(1, "a b c".split(' '), SparseVector(3, {0: 1.0, 1: 1.0}),),
(2, "a b".split(' '), SparseVector(3, {0: 1.0}),),
(3, "a".split(' '), SparseVector(3, {}),)], ["id", "words", "expected"])
cv = CountVectorizer(inputCol="words", outputCol="features")
model1 = cv.setMaxDF(3).fit(dataset)
self.assertEqual(model1.vocabulary, ['b', 'c', 'd'])
transformedList1 = model1.transform(dataset).select("features", "expected").collect()
for r in transformedList1:
feature, expected = r
self.assertEqual(feature, expected)
model2 = cv.setMaxDF(0.75).fit(dataset)
self.assertEqual(model2.vocabulary, ['b', 'c', 'd'])
transformedList2 = model2.transform(dataset).select("features", "expected").collect()
for r in transformedList2:
feature, expected = r
self.assertEqual(feature, expected)
def test_count_vectorizer_from_vocab(self):
model = CountVectorizerModel.from_vocabulary(["a", "b", "c"], inputCol="words",
outputCol="features", minTF=2)
self.assertEqual(model.vocabulary, ["a", "b", "c"])
self.assertEqual(model.getMinTF(), 2)
dataset = self.spark.createDataFrame([
(0, "a a a b b c".split(' '), SparseVector(3, {0: 3.0, 1: 2.0}),),
(1, "a a".split(' '), SparseVector(3, {0: 2.0}),),
(2, "a b".split(' '), SparseVector(3, {}),)], ["id", "words", "expected"])
transformed_list = model.transform(dataset).select("features", "expected").collect()
for r in transformed_list:
feature, expected = r
self.assertEqual(feature, expected)
# Test an empty vocabulary
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, "vocabSize.*invalid.*0"):
CountVectorizerModel.from_vocabulary([], inputCol="words")
# Test model with default settings can transform
model_default = CountVectorizerModel.from_vocabulary(["a", "b", "c"], inputCol="words")
transformed_list = model_default.transform(dataset)\
.select(model_default.getOrDefault(model_default.outputCol)).collect()
self.assertEqual(len(transformed_list), 3)
def test_rformula_force_index_label(self):
df = self.spark.createDataFrame([
(1.0, 1.0, "a"),
(0.0, 2.0, "b"),
(1.0, 0.0, "a")], ["y", "x", "s"])
# Does not index label by default since it's numeric type.
rf = RFormula(formula="y ~ x + s")
model = rf.fit(df)
transformedDF = model.transform(df)
self.assertEqual(transformedDF.head().label, 1.0)
# Force to index label.
rf2 = RFormula(formula="y ~ x + s").setForceIndexLabel(True)
model2 = rf2.fit(df)
transformedDF2 = model2.transform(df)
self.assertEqual(transformedDF2.head().label, 0.0)
def test_rformula_string_indexer_order_type(self):
df = self.spark.createDataFrame([
(1.0, 1.0, "a"),
(0.0, 2.0, "b"),
(1.0, 0.0, "a")], ["y", "x", "s"])
rf = RFormula(formula="y ~ x + s", stringIndexerOrderType="alphabetDesc")
self.assertEqual(rf.getStringIndexerOrderType(), 'alphabetDesc')
transformedDF = rf.fit(df).transform(df)
observed = transformedDF.select("features").collect()
expected = [[1.0, 0.0], [2.0, 1.0], [0.0, 0.0]]
for i in range(0, len(expected)):
self.assertTrue(all(observed[i]["features"].toArray() == expected[i]))
def test_string_indexer_handle_invalid(self):
df = self.spark.createDataFrame([
(0, "a"),
(1, "d"),
(2, None)], ["id", "label"])
si1 = StringIndexer(inputCol="label", outputCol="indexed", handleInvalid="keep",
stringOrderType="alphabetAsc")
model1 = si1.fit(df)
td1 = model1.transform(df)
actual1 = td1.select("id", "indexed").collect()
expected1 = [Row(id=0, indexed=0.0), Row(id=1, indexed=1.0), Row(id=2, indexed=2.0)]
self.assertEqual(actual1, expected1)
si2 = si1.setHandleInvalid("skip")
model2 = si2.fit(df)
td2 = model2.transform(df)
actual2 = td2.select("id", "indexed").collect()
expected2 = [Row(id=0, indexed=0.0), Row(id=1, indexed=1.0)]
self.assertEqual(actual2, expected2)
class HasInducedError(Params):
def __init__(self):
super(HasInducedError, self).__init__()
self.inducedError = Param(self, "inducedError",
"Uniformly-distributed error added to feature")
def getInducedError(self):
return self.getOrDefault(self.inducedError)
class InducedErrorModel(Model, HasInducedError):
def __init__(self):
super(InducedErrorModel, self).__init__()
def _transform(self, dataset):
return dataset.withColumn("prediction",
dataset.feature + (rand(0) * self.getInducedError()))
class InducedErrorEstimator(Estimator, HasInducedError):
def __init__(self, inducedError=1.0):
super(InducedErrorEstimator, self).__init__()
self._set(inducedError=inducedError)
def _fit(self, dataset):
model = InducedErrorModel()
self._copyValues(model)
return model
class CrossValidatorTests(SparkSessionTestCase):
def test_copy(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="rmse")
grid = (ParamGridBuilder()
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0])
.build())
cv = CrossValidator(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
cvCopied = cv.copy()
self.assertEqual(cv.getEstimator().uid, cvCopied.getEstimator().uid)
cvModel = cv.fit(dataset)
cvModelCopied = cvModel.copy()
for index in range(len(cvModel.avgMetrics)):
self.assertTrue(abs(cvModel.avgMetrics[index] - cvModelCopied.avgMetrics[index])
< 0.0001)
def test_fit_minimize_metric(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="rmse")
grid = (ParamGridBuilder()
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0])
.build())
cv = CrossValidator(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
bestModel = cvModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(0.0, bestModelMetric, "Best model has RMSE of 0")
def test_fit_maximize_metric(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="r2")
grid = (ParamGridBuilder()
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0])
.build())
cv = CrossValidator(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
bestModel = cvModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(1.0, bestModelMetric, "Best model has R-squared of 1")
def test_save_load_trained_model(self):
# This tests saving and loading the trained model only.
# Save/load for CrossValidator will be added later: SPARK-13786
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
lrModel = cvModel.bestModel
cvModelPath = temp_path + "/cvModel"
lrModel.save(cvModelPath)
loadedLrModel = LogisticRegressionModel.load(cvModelPath)
self.assertEqual(loadedLrModel.uid, lrModel.uid)
self.assertEqual(loadedLrModel.intercept, lrModel.intercept)
def test_save_load_simple_estimator(self):
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
# test save/load of CrossValidator
cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
cvPath = temp_path + "/cv"
cv.save(cvPath)
loadedCV = CrossValidator.load(cvPath)
self.assertEqual(loadedCV.getEstimator().uid, cv.getEstimator().uid)
self.assertEqual(loadedCV.getEvaluator().uid, cv.getEvaluator().uid)
self.assertEqual(loadedCV.getEstimatorParamMaps(), cv.getEstimatorParamMaps())
# test save/load of CrossValidatorModel
cvModelPath = temp_path + "/cvModel"
cvModel.save(cvModelPath)
loadedModel = CrossValidatorModel.load(cvModelPath)
self.assertEqual(loadedModel.bestModel.uid, cvModel.bestModel.uid)
def test_parallel_evaluation(self):
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [5, 6]).build()
evaluator = BinaryClassificationEvaluator()
# test save/load of CrossValidator
cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
cv.setParallelism(1)
cvSerialModel = cv.fit(dataset)
cv.setParallelism(2)
cvParallelModel = cv.fit(dataset)
self.assertEqual(cvSerialModel.avgMetrics, cvParallelModel.avgMetrics)
def test_save_load_nested_estimator(self):
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
ova = OneVsRest(classifier=LogisticRegression())
lr1 = LogisticRegression().setMaxIter(100)
lr2 = LogisticRegression().setMaxIter(150)
grid = ParamGridBuilder().addGrid(ova.classifier, [lr1, lr2]).build()
evaluator = MulticlassClassificationEvaluator()
# test save/load of CrossValidator
cv = CrossValidator(estimator=ova, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
cvPath = temp_path + "/cv"
cv.save(cvPath)
loadedCV = CrossValidator.load(cvPath)
self.assertEqual(loadedCV.getEstimator().uid, cv.getEstimator().uid)
self.assertEqual(loadedCV.getEvaluator().uid, cv.getEvaluator().uid)
originalParamMap = cv.getEstimatorParamMaps()
loadedParamMap = loadedCV.getEstimatorParamMaps()
for i, param in enumerate(loadedParamMap):
for p in param:
if p.name == "classifier":
self.assertEqual(param[p].uid, originalParamMap[i][p].uid)
else:
self.assertEqual(param[p], originalParamMap[i][p])
# test save/load of CrossValidatorModel
cvModelPath = temp_path + "/cvModel"
cvModel.save(cvModelPath)
loadedModel = CrossValidatorModel.load(cvModelPath)
self.assertEqual(loadedModel.bestModel.uid, cvModel.bestModel.uid)
class TrainValidationSplitTests(SparkSessionTestCase):
def test_fit_minimize_metric(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="rmse")
grid = ParamGridBuilder() \
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0]) \
.build()
tvs = TrainValidationSplit(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
bestModel = tvsModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
validationMetrics = tvsModel.validationMetrics
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(0.0, bestModelMetric, "Best model has RMSE of 0")
self.assertEqual(len(grid), len(validationMetrics),
"validationMetrics has the same size of grid parameter")
self.assertEqual(0.0, min(validationMetrics))
def test_fit_maximize_metric(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="r2")
grid = ParamGridBuilder() \
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0]) \
.build()
tvs = TrainValidationSplit(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
bestModel = tvsModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
validationMetrics = tvsModel.validationMetrics
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(1.0, bestModelMetric, "Best model has R-squared of 1")
self.assertEqual(len(grid), len(validationMetrics),
"validationMetrics has the same size of grid parameter")
self.assertEqual(1.0, max(validationMetrics))
def test_save_load_trained_model(self):
# This tests saving and loading the trained model only.
# Save/load for TrainValidationSplit will be added later: SPARK-13786
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
lrModel = tvsModel.bestModel
tvsModelPath = temp_path + "/tvsModel"
lrModel.save(tvsModelPath)
loadedLrModel = LogisticRegressionModel.load(tvsModelPath)
self.assertEqual(loadedLrModel.uid, lrModel.uid)
self.assertEqual(loadedLrModel.intercept, lrModel.intercept)
def test_save_load_simple_estimator(self):
# This tests saving and loading the trained model only.
# Save/load for TrainValidationSplit will be added later: SPARK-13786
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
tvsPath = temp_path + "/tvs"
tvs.save(tvsPath)
loadedTvs = TrainValidationSplit.load(tvsPath)
self.assertEqual(loadedTvs.getEstimator().uid, tvs.getEstimator().uid)
self.assertEqual(loadedTvs.getEvaluator().uid, tvs.getEvaluator().uid)
self.assertEqual(loadedTvs.getEstimatorParamMaps(), tvs.getEstimatorParamMaps())
tvsModelPath = temp_path + "/tvsModel"
tvsModel.save(tvsModelPath)
loadedModel = TrainValidationSplitModel.load(tvsModelPath)
self.assertEqual(loadedModel.bestModel.uid, tvsModel.bestModel.uid)
def test_parallel_evaluation(self):
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [5, 6]).build()
evaluator = BinaryClassificationEvaluator()
tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
tvs.setParallelism(1)
tvsSerialModel = tvs.fit(dataset)
tvs.setParallelism(2)
tvsParallelModel = tvs.fit(dataset)
self.assertEqual(tvsSerialModel.validationMetrics, tvsParallelModel.validationMetrics)
def test_save_load_nested_estimator(self):
# This tests saving and loading the trained model only.
# Save/load for TrainValidationSplit will be added later: SPARK-13786
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
ova = OneVsRest(classifier=LogisticRegression())
lr1 = LogisticRegression().setMaxIter(100)
lr2 = LogisticRegression().setMaxIter(150)
grid = ParamGridBuilder().addGrid(ova.classifier, [lr1, lr2]).build()
evaluator = MulticlassClassificationEvaluator()
tvs = TrainValidationSplit(estimator=ova, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
tvsPath = temp_path + "/tvs"
tvs.save(tvsPath)
loadedTvs = TrainValidationSplit.load(tvsPath)
self.assertEqual(loadedTvs.getEstimator().uid, tvs.getEstimator().uid)
self.assertEqual(loadedTvs.getEvaluator().uid, tvs.getEvaluator().uid)
originalParamMap = tvs.getEstimatorParamMaps()
loadedParamMap = loadedTvs.getEstimatorParamMaps()
for i, param in enumerate(loadedParamMap):
for p in param:
if p.name == "classifier":
self.assertEqual(param[p].uid, originalParamMap[i][p].uid)
else:
self.assertEqual(param[p], originalParamMap[i][p])
tvsModelPath = temp_path + "/tvsModel"
tvsModel.save(tvsModelPath)
loadedModel = TrainValidationSplitModel.load(tvsModelPath)
self.assertEqual(loadedModel.bestModel.uid, tvsModel.bestModel.uid)
def test_copy(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="r2")
grid = ParamGridBuilder() \
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0]) \
.build()
tvs = TrainValidationSplit(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
tvsCopied = tvs.copy()
tvsModelCopied = tvsModel.copy()
self.assertEqual(tvs.getEstimator().uid, tvsCopied.getEstimator().uid,
"Copied TrainValidationSplit has the same uid of Estimator")
self.assertEqual(tvsModel.bestModel.uid, tvsModelCopied.bestModel.uid)
self.assertEqual(len(tvsModel.validationMetrics),
len(tvsModelCopied.validationMetrics),
"Copied validationMetrics has the same size of the original")
for index in range(len(tvsModel.validationMetrics)):
self.assertEqual(tvsModel.validationMetrics[index],
tvsModelCopied.validationMetrics[index])
class PersistenceTest(SparkSessionTestCase):
def test_linear_regression(self):
lr = LinearRegression(maxIter=1)
path = tempfile.mkdtemp()
lr_path = path + "/lr"
lr.save(lr_path)
lr2 = LinearRegression.load(lr_path)
self.assertEqual(lr.uid, lr2.uid)
self.assertEqual(type(lr.uid), type(lr2.uid))
self.assertEqual(lr2.uid, lr2.maxIter.parent,
"Loaded LinearRegression instance uid (%s) did not match Param's uid (%s)"
% (lr2.uid, lr2.maxIter.parent))
self.assertEqual(lr._defaultParamMap[lr.maxIter], lr2._defaultParamMap[lr2.maxIter],
"Loaded LinearRegression instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def test_logistic_regression(self):
lr = LogisticRegression(maxIter=1)
path = tempfile.mkdtemp()
lr_path = path + "/logreg"
lr.save(lr_path)
lr2 = LogisticRegression.load(lr_path)
self.assertEqual(lr2.uid, lr2.maxIter.parent,
"Loaded LogisticRegression instance uid (%s) "
"did not match Param's uid (%s)"
% (lr2.uid, lr2.maxIter.parent))
self.assertEqual(lr._defaultParamMap[lr.maxIter], lr2._defaultParamMap[lr2.maxIter],
"Loaded LogisticRegression instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def _compare_params(self, m1, m2, param):
"""
Compare 2 ML Params instances for the given param, and assert both have the same param value
and parent. The param must be a parameter of m1.
"""
# Prevent key not found error in case of some param in neither paramMap nor defaultParamMap.
if m1.isDefined(param):
paramValue1 = m1.getOrDefault(param)
paramValue2 = m2.getOrDefault(m2.getParam(param.name))
if isinstance(paramValue1, Params):
self._compare_pipelines(paramValue1, paramValue2)
else:
self.assertEqual(paramValue1, paramValue2) # for general types param
# Assert parents are equal
self.assertEqual(param.parent, m2.getParam(param.name).parent)
else:
# If m1 is not defined param, then m2 should not, too. See SPARK-14931.
self.assertFalse(m2.isDefined(m2.getParam(param.name)))
def _compare_pipelines(self, m1, m2):
"""
Compare 2 ML types, asserting that they are equivalent.
This currently supports:
- basic types
- Pipeline, PipelineModel
- OneVsRest, OneVsRestModel
This checks:
- uid
- type
- Param values and parents
"""
self.assertEqual(m1.uid, m2.uid)
self.assertEqual(type(m1), type(m2))
if isinstance(m1, JavaParams) or isinstance(m1, Transformer):
self.assertEqual(len(m1.params), len(m2.params))
for p in m1.params:
self._compare_params(m1, m2, p)
elif isinstance(m1, Pipeline):
self.assertEqual(len(m1.getStages()), len(m2.getStages()))
for s1, s2 in zip(m1.getStages(), m2.getStages()):
self._compare_pipelines(s1, s2)
elif isinstance(m1, PipelineModel):
self.assertEqual(len(m1.stages), len(m2.stages))
for s1, s2 in zip(m1.stages, m2.stages):
self._compare_pipelines(s1, s2)
elif isinstance(m1, OneVsRest) or isinstance(m1, OneVsRestModel):
for p in m1.params:
self._compare_params(m1, m2, p)
if isinstance(m1, OneVsRestModel):
self.assertEqual(len(m1.models), len(m2.models))
for x, y in zip(m1.models, m2.models):
self._compare_pipelines(x, y)
else:
raise RuntimeError("_compare_pipelines does not yet support type: %s" % type(m1))
def test_pipeline_persistence(self):
"""
Pipeline[HashingTF, PCA]
"""
temp_path = tempfile.mkdtemp()
try:
df = self.spark.createDataFrame([(["a", "b", "c"],), (["c", "d", "e"],)], ["words"])
tf = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
pca = PCA(k=2, inputCol="features", outputCol="pca_features")
pl = Pipeline(stages=[tf, pca])
model = pl.fit(df)
pipeline_path = temp_path + "/pipeline"
pl.save(pipeline_path)
loaded_pipeline = Pipeline.load(pipeline_path)
self._compare_pipelines(pl, loaded_pipeline)
model_path = temp_path + "/pipeline-model"
model.save(model_path)
loaded_model = PipelineModel.load(model_path)
self._compare_pipelines(model, loaded_model)
finally:
try:
rmtree(temp_path)
except OSError:
pass
def test_nested_pipeline_persistence(self):
"""
Pipeline[HashingTF, Pipeline[PCA]]
"""
temp_path = tempfile.mkdtemp()
try:
df = self.spark.createDataFrame([(["a", "b", "c"],), (["c", "d", "e"],)], ["words"])
tf = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
pca = PCA(k=2, inputCol="features", outputCol="pca_features")
p0 = Pipeline(stages=[pca])
pl = Pipeline(stages=[tf, p0])
model = pl.fit(df)
pipeline_path = temp_path + "/pipeline"
pl.save(pipeline_path)
loaded_pipeline = Pipeline.load(pipeline_path)
self._compare_pipelines(pl, loaded_pipeline)
model_path = temp_path + "/pipeline-model"
model.save(model_path)
loaded_model = PipelineModel.load(model_path)
self._compare_pipelines(model, loaded_model)
finally:
try:
rmtree(temp_path)
except OSError:
pass
def test_python_transformer_pipeline_persistence(self):
"""
Pipeline[MockUnaryTransformer, Binarizer]
"""
temp_path = tempfile.mkdtemp()
try:
df = self.spark.range(0, 10).toDF('input')
tf = MockUnaryTransformer(shiftVal=2)\
.setInputCol("input").setOutputCol("shiftedInput")
tf2 = Binarizer(threshold=6, inputCol="shiftedInput", outputCol="binarized")
pl = Pipeline(stages=[tf, tf2])
model = pl.fit(df)
pipeline_path = temp_path + "/pipeline"
pl.save(pipeline_path)
loaded_pipeline = Pipeline.load(pipeline_path)
self._compare_pipelines(pl, loaded_pipeline)
model_path = temp_path + "/pipeline-model"
model.save(model_path)
loaded_model = PipelineModel.load(model_path)
self._compare_pipelines(model, loaded_model)
finally:
try:
rmtree(temp_path)
except OSError:
pass
def test_onevsrest(self):
temp_path = tempfile.mkdtemp()
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))] * 10,
["label", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr)
model = ovr.fit(df)
ovrPath = temp_path + "/ovr"
ovr.save(ovrPath)
loadedOvr = OneVsRest.load(ovrPath)
self._compare_pipelines(ovr, loadedOvr)
modelPath = temp_path + "/ovrModel"
model.save(modelPath)
loadedModel = OneVsRestModel.load(modelPath)
self._compare_pipelines(model, loadedModel)
def test_decisiontree_classifier(self):
dt = DecisionTreeClassifier(maxDepth=1)
path = tempfile.mkdtemp()
dtc_path = path + "/dtc"
dt.save(dtc_path)
dt2 = DecisionTreeClassifier.load(dtc_path)
self.assertEqual(dt2.uid, dt2.maxDepth.parent,
"Loaded DecisionTreeClassifier instance uid (%s) "
"did not match Param's uid (%s)"
% (dt2.uid, dt2.maxDepth.parent))
self.assertEqual(dt._defaultParamMap[dt.maxDepth], dt2._defaultParamMap[dt2.maxDepth],
"Loaded DecisionTreeClassifier instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def test_decisiontree_regressor(self):
dt = DecisionTreeRegressor(maxDepth=1)
path = tempfile.mkdtemp()
dtr_path = path + "/dtr"
dt.save(dtr_path)
dt2 = DecisionTreeClassifier.load(dtr_path)
self.assertEqual(dt2.uid, dt2.maxDepth.parent,
"Loaded DecisionTreeRegressor instance uid (%s) "
"did not match Param's uid (%s)"
% (dt2.uid, dt2.maxDepth.parent))
self.assertEqual(dt._defaultParamMap[dt.maxDepth], dt2._defaultParamMap[dt2.maxDepth],
"Loaded DecisionTreeRegressor instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def test_default_read_write(self):
temp_path = tempfile.mkdtemp()
lr = LogisticRegression()
lr.setMaxIter(50)
lr.setThreshold(.75)
writer = DefaultParamsWriter(lr)
savePath = temp_path + "/lr"
writer.save(savePath)
reader = DefaultParamsReadable.read()
lr2 = reader.load(savePath)
self.assertEqual(lr.uid, lr2.uid)
self.assertEqual(lr.extractParamMap(), lr2.extractParamMap())
# test overwrite
lr.setThreshold(.8)
writer.overwrite().save(savePath)
reader = DefaultParamsReadable.read()
lr3 = reader.load(savePath)
self.assertEqual(lr.uid, lr3.uid)
self.assertEqual(lr.extractParamMap(), lr3.extractParamMap())
class LDATest(SparkSessionTestCase):
def _compare(self, m1, m2):
"""
Temp method for comparing instances.
TODO: Replace with generic implementation once SPARK-14706 is merged.
"""
self.assertEqual(m1.uid, m2.uid)
self.assertEqual(type(m1), type(m2))
self.assertEqual(len(m1.params), len(m2.params))
for p in m1.params:
if m1.isDefined(p):
self.assertEqual(m1.getOrDefault(p), m2.getOrDefault(p))
self.assertEqual(p.parent, m2.getParam(p.name).parent)
if isinstance(m1, LDAModel):
self.assertEqual(m1.vocabSize(), m2.vocabSize())
self.assertEqual(m1.topicsMatrix(), m2.topicsMatrix())
def test_persistence(self):
# Test save/load for LDA, LocalLDAModel, DistributedLDAModel.
df = self.spark.createDataFrame([
[1, Vectors.dense([0.0, 1.0])],
[2, Vectors.sparse(2, {0: 1.0})],
], ["id", "features"])
# Fit model
lda = LDA(k=2, seed=1, optimizer="em")
distributedModel = lda.fit(df)
self.assertTrue(distributedModel.isDistributed())
localModel = distributedModel.toLocal()
self.assertFalse(localModel.isDistributed())
# Define paths
path = tempfile.mkdtemp()
lda_path = path + "/lda"
dist_model_path = path + "/distLDAModel"
local_model_path = path + "/localLDAModel"
# Test LDA
lda.save(lda_path)
lda2 = LDA.load(lda_path)
self._compare(lda, lda2)
# Test DistributedLDAModel
distributedModel.save(dist_model_path)
distributedModel2 = DistributedLDAModel.load(dist_model_path)
self._compare(distributedModel, distributedModel2)
# Test LocalLDAModel
localModel.save(local_model_path)
localModel2 = LocalLDAModel.load(local_model_path)
self._compare(localModel, localModel2)
# Clean up
try:
rmtree(path)
except OSError:
pass
class TrainingSummaryTest(SparkSessionTestCase):
def test_linear_regression_summary(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
lr = LinearRegression(maxIter=5, regParam=0.0, solver="normal", weightCol="weight",
fitIntercept=False)
model = lr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.predictionCol, "prediction")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.featuresCol, "features")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertAlmostEqual(s.explainedVariance, 0.25, 2)
self.assertAlmostEqual(s.meanAbsoluteError, 0.0)
self.assertAlmostEqual(s.meanSquaredError, 0.0)
self.assertAlmostEqual(s.rootMeanSquaredError, 0.0)
self.assertAlmostEqual(s.r2, 1.0, 2)
self.assertAlmostEqual(s.r2adj, 1.0, 2)
self.assertTrue(isinstance(s.residuals, DataFrame))
self.assertEqual(s.numInstances, 2)
self.assertEqual(s.degreesOfFreedom, 1)
devResiduals = s.devianceResiduals
self.assertTrue(isinstance(devResiduals, list) and isinstance(devResiduals[0], float))
coefStdErr = s.coefficientStandardErrors
self.assertTrue(isinstance(coefStdErr, list) and isinstance(coefStdErr[0], float))
tValues = s.tValues
self.assertTrue(isinstance(tValues, list) and isinstance(tValues[0], float))
pValues = s.pValues
self.assertTrue(isinstance(pValues, list) and isinstance(pValues[0], float))
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned
# The child class LinearRegressionTrainingSummary runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.explainedVariance, s.explainedVariance)
def test_glr_summary(self):
from pyspark.ml.linalg import Vectors
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
glr = GeneralizedLinearRegression(family="gaussian", link="identity", weightCol="weight",
fitIntercept=False)
model = glr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertEqual(s.numIterations, 1) # this should default to a single iteration of WLS
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.predictionCol, "prediction")
self.assertEqual(s.numInstances, 2)
self.assertTrue(isinstance(s.residuals(), DataFrame))
self.assertTrue(isinstance(s.residuals("pearson"), DataFrame))
coefStdErr = s.coefficientStandardErrors
self.assertTrue(isinstance(coefStdErr, list) and isinstance(coefStdErr[0], float))
tValues = s.tValues
self.assertTrue(isinstance(tValues, list) and isinstance(tValues[0], float))
pValues = s.pValues
self.assertTrue(isinstance(pValues, list) and isinstance(pValues[0], float))
self.assertEqual(s.degreesOfFreedom, 1)
self.assertEqual(s.residualDegreeOfFreedom, 1)
self.assertEqual(s.residualDegreeOfFreedomNull, 2)
self.assertEqual(s.rank, 1)
self.assertTrue(isinstance(s.solver, basestring))
self.assertTrue(isinstance(s.aic, float))
self.assertTrue(isinstance(s.deviance, float))
self.assertTrue(isinstance(s.nullDeviance, float))
self.assertTrue(isinstance(s.dispersion, float))
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned
# The child class GeneralizedLinearRegressionTrainingSummary runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.deviance, s.deviance)
def test_binary_logistic_regression_summary(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight", fitIntercept=False)
model = lr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.probabilityCol, "probability")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.labels, list))
self.assertTrue(isinstance(s.truePositiveRateByLabel, list))
self.assertTrue(isinstance(s.falsePositiveRateByLabel, list))
self.assertTrue(isinstance(s.precisionByLabel, list))
self.assertTrue(isinstance(s.recallByLabel, list))
self.assertTrue(isinstance(s.fMeasureByLabel(), list))
self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list))
self.assertTrue(isinstance(s.roc, DataFrame))
self.assertAlmostEqual(s.areaUnderROC, 1.0, 2)
self.assertTrue(isinstance(s.pr, DataFrame))
self.assertTrue(isinstance(s.fMeasureByThreshold, DataFrame))
self.assertTrue(isinstance(s.precisionByThreshold, DataFrame))
self.assertTrue(isinstance(s.recallByThreshold, DataFrame))
self.assertAlmostEqual(s.accuracy, 1.0, 2)
self.assertAlmostEqual(s.weightedTruePositiveRate, 1.0, 2)
self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.0, 2)
self.assertAlmostEqual(s.weightedRecall, 1.0, 2)
self.assertAlmostEqual(s.weightedPrecision, 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(), 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(1.0), 1.0, 2)
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.areaUnderROC, s.areaUnderROC)
def test_multiclass_logistic_regression_summary(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], [])),
(2.0, 2.0, Vectors.dense(2.0)),
(2.0, 2.0, Vectors.dense(1.9))],
["label", "weight", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight", fitIntercept=False)
model = lr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.probabilityCol, "probability")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.labels, list))
self.assertTrue(isinstance(s.truePositiveRateByLabel, list))
self.assertTrue(isinstance(s.falsePositiveRateByLabel, list))
self.assertTrue(isinstance(s.precisionByLabel, list))
self.assertTrue(isinstance(s.recallByLabel, list))
self.assertTrue(isinstance(s.fMeasureByLabel(), list))
self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list))
self.assertAlmostEqual(s.accuracy, 0.75, 2)
self.assertAlmostEqual(s.weightedTruePositiveRate, 0.75, 2)
self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.25, 2)
self.assertAlmostEqual(s.weightedRecall, 0.75, 2)
self.assertAlmostEqual(s.weightedPrecision, 0.583, 2)
self.assertAlmostEqual(s.weightedFMeasure(), 0.65, 2)
self.assertAlmostEqual(s.weightedFMeasure(1.0), 0.65, 2)
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.accuracy, s.accuracy)
def test_gaussian_mixture_summary(self):
data = [(Vectors.dense(1.0),), (Vectors.dense(5.0),), (Vectors.dense(10.0),),
(Vectors.sparse(1, [], []),)]
df = self.spark.createDataFrame(data, ["features"])
gmm = GaussianMixture(k=2)
model = gmm.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.probabilityCol, "probability")
self.assertTrue(isinstance(s.probability, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
def test_bisecting_kmeans_summary(self):
data = [(Vectors.dense(1.0),), (Vectors.dense(5.0),), (Vectors.dense(10.0),),
(Vectors.sparse(1, [], []),)]
df = self.spark.createDataFrame(data, ["features"])
bkm = BisectingKMeans(k=2)
model = bkm.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
def test_kmeans_summary(self):
data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
(Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
df = self.spark.createDataFrame(data, ["features"])
kmeans = KMeans(k=2, seed=1)
model = kmeans.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
class KMeansTests(SparkSessionTestCase):
def test_kmeans_cosine_distance(self):
data = [(Vectors.dense([1.0, 1.0]),), (Vectors.dense([10.0, 10.0]),),
(Vectors.dense([1.0, 0.5]),), (Vectors.dense([10.0, 4.4]),),
(Vectors.dense([-1.0, 1.0]),), (Vectors.dense([-100.0, 90.0]),)]
df = self.spark.createDataFrame(data, ["features"])
kmeans = KMeans(k=3, seed=1, distanceMeasure="cosine")
model = kmeans.fit(df)
result = model.transform(df).collect()
self.assertTrue(result[0].prediction == result[1].prediction)
self.assertTrue(result[2].prediction == result[3].prediction)
self.assertTrue(result[4].prediction == result[5].prediction)
class OneVsRestTests(SparkSessionTestCase):
def test_copy(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))],
["label", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr)
ovr1 = ovr.copy({lr.maxIter: 10})
self.assertEqual(ovr.getClassifier().getMaxIter(), 5)
self.assertEqual(ovr1.getClassifier().getMaxIter(), 10)
model = ovr.fit(df)
model1 = model.copy({model.predictionCol: "indexed"})
self.assertEqual(model1.getPredictionCol(), "indexed")
def test_output_columns(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))],
["label", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr, parallelism=1)
model = ovr.fit(df)
output = model.transform(df)
self.assertEqual(output.columns, ["label", "features", "prediction"])
def test_parallelism_doesnt_change_output(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))],
["label", "features"])
ovrPar1 = OneVsRest(classifier=LogisticRegression(maxIter=5, regParam=.01), parallelism=1)
modelPar1 = ovrPar1.fit(df)
ovrPar2 = OneVsRest(classifier=LogisticRegression(maxIter=5, regParam=.01), parallelism=2)
modelPar2 = ovrPar2.fit(df)
for i, model in enumerate(modelPar1.models):
self.assertTrue(np.allclose(model.coefficients.toArray(),
modelPar2.models[i].coefficients.toArray(), atol=1E-4))
self.assertTrue(np.allclose(model.intercept, modelPar2.models[i].intercept, atol=1E-4))
def test_support_for_weightCol(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8), 1.0),
(1.0, Vectors.sparse(2, [], []), 1.0),
(2.0, Vectors.dense(0.5, 0.5), 1.0)],
["label", "features", "weight"])
# classifier inherits hasWeightCol
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr, weightCol="weight")
self.assertIsNotNone(ovr.fit(df))
# classifier doesn't inherit hasWeightCol
dt = DecisionTreeClassifier()
ovr2 = OneVsRest(classifier=dt, weightCol="weight")
self.assertIsNotNone(ovr2.fit(df))
class HashingTFTest(SparkSessionTestCase):
def test_apply_binary_term_freqs(self):
df = self.spark.createDataFrame([(0, ["a", "a", "b", "c", "c", "c"])], ["id", "words"])
n = 10
hashingTF = HashingTF()
hashingTF.setInputCol("words").setOutputCol("features").setNumFeatures(n).setBinary(True)
output = hashingTF.transform(df)
features = output.select("features").first().features.toArray()
expected = Vectors.dense([1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).toArray()
for i in range(0, n):
self.assertAlmostEqual(features[i], expected[i], 14, "Error at " + str(i) +
": expected " + str(expected[i]) + ", got " + str(features[i]))
class GeneralizedLinearRegressionTest(SparkSessionTestCase):
def test_tweedie_distribution(self):
df = self.spark.createDataFrame(
[(1.0, Vectors.dense(0.0, 0.0)),
(1.0, Vectors.dense(1.0, 2.0)),
(2.0, Vectors.dense(0.0, 0.0)),
(2.0, Vectors.dense(1.0, 1.0)), ], ["label", "features"])
glr = GeneralizedLinearRegression(family="tweedie", variancePower=1.6)
model = glr.fit(df)
self.assertTrue(np.allclose(model.coefficients.toArray(), [-0.4645, 0.3402], atol=1E-4))
self.assertTrue(np.isclose(model.intercept, 0.7841, atol=1E-4))
model2 = glr.setLinkPower(-1.0).fit(df)
self.assertTrue(np.allclose(model2.coefficients.toArray(), [-0.6667, 0.5], atol=1E-4))
self.assertTrue(np.isclose(model2.intercept, 0.6667, atol=1E-4))
def test_offset(self):
df = self.spark.createDataFrame(
[(0.2, 1.0, 2.0, Vectors.dense(0.0, 5.0)),
(0.5, 2.1, 0.5, Vectors.dense(1.0, 2.0)),
(0.9, 0.4, 1.0, Vectors.dense(2.0, 1.0)),
(0.7, 0.7, 0.0, Vectors.dense(3.0, 3.0))], ["label", "weight", "offset", "features"])
glr = GeneralizedLinearRegression(family="poisson", weightCol="weight", offsetCol="offset")
model = glr.fit(df)
self.assertTrue(np.allclose(model.coefficients.toArray(), [0.664647, -0.3192581],
atol=1E-4))
self.assertTrue(np.isclose(model.intercept, -1.561613, atol=1E-4))
class LinearRegressionTest(SparkSessionTestCase):
def test_linear_regression_with_huber_loss(self):
data_path = "data/mllib/sample_linear_regression_data.txt"
df = self.spark.read.format("libsvm").load(data_path)
lir = LinearRegression(loss="huber", epsilon=2.0)
model = lir.fit(df)
expectedCoefficients = [0.136, 0.7648, -0.7761, 2.4236, 0.537,
1.2612, -0.333, -0.5694, -0.6311, 0.6053]
expectedIntercept = 0.1607
expectedScale = 9.758
self.assertTrue(
np.allclose(model.coefficients.toArray(), expectedCoefficients, atol=1E-3))
self.assertTrue(np.isclose(model.intercept, expectedIntercept, atol=1E-3))
self.assertTrue(np.isclose(model.scale, expectedScale, atol=1E-3))
class LogisticRegressionTest(SparkSessionTestCase):
def test_binomial_logistic_regression_with_bound(self):
df = self.spark.createDataFrame(
[(1.0, 1.0, Vectors.dense(0.0, 5.0)),
(0.0, 2.0, Vectors.dense(1.0, 2.0)),
(1.0, 3.0, Vectors.dense(2.0, 1.0)),
(0.0, 4.0, Vectors.dense(3.0, 3.0)), ], ["label", "weight", "features"])
lor = LogisticRegression(regParam=0.01, weightCol="weight",
lowerBoundsOnCoefficients=Matrices.dense(1, 2, [-1.0, -1.0]),
upperBoundsOnIntercepts=Vectors.dense(0.0))
model = lor.fit(df)
self.assertTrue(
np.allclose(model.coefficients.toArray(), [-0.2944, -0.0484], atol=1E-4))
self.assertTrue(np.isclose(model.intercept, 0.0, atol=1E-4))
def test_multinomial_logistic_regression_with_bound(self):
data_path = "data/mllib/sample_multiclass_classification_data.txt"
df = self.spark.read.format("libsvm").load(data_path)
lor = LogisticRegression(regParam=0.01,
lowerBoundsOnCoefficients=Matrices.dense(3, 4, range(12)),
upperBoundsOnIntercepts=Vectors.dense(0.0, 0.0, 0.0))
model = lor.fit(df)
expected = [[4.593, 4.5516, 9.0099, 12.2904],
[1.0, 8.1093, 7.0, 10.0],
[3.041, 5.0, 8.0, 11.0]]
for i in range(0, len(expected)):
self.assertTrue(
np.allclose(model.coefficientMatrix.toArray()[i], expected[i], atol=1E-4))
self.assertTrue(
np.allclose(model.interceptVector.toArray(), [-0.9057, -1.1392, -0.0033], atol=1E-4))
class MultilayerPerceptronClassifierTest(SparkSessionTestCase):
def test_raw_and_probability_prediction(self):
data_path = "data/mllib/sample_multiclass_classification_data.txt"
df = self.spark.read.format("libsvm").load(data_path)
mlp = MultilayerPerceptronClassifier(maxIter=100, layers=[4, 5, 4, 3],
blockSize=128, seed=123)
model = mlp.fit(df)
test = self.sc.parallelize([Row(features=Vectors.dense(0.1, 0.1, 0.25, 0.25))]).toDF()
result = model.transform(test).head()
expected_prediction = 2.0
expected_probability = [0.0, 0.0, 1.0]
expected_rawPrediction = [57.3955, -124.5462, 67.9943]
self.assertTrue(result.prediction, expected_prediction)
self.assertTrue(np.allclose(result.probability, expected_probability, atol=1E-4))
self.assertTrue(np.allclose(result.rawPrediction, expected_rawPrediction, atol=1E-4))
class FPGrowthTests(SparkSessionTestCase):
def setUp(self):
super(FPGrowthTests, self).setUp()
self.data = self.spark.createDataFrame(
[([1, 2], ), ([1, 2], ), ([1, 2, 3], ), ([1, 3], )],
["items"])
def test_association_rules(self):
fp = FPGrowth()
fpm = fp.fit(self.data)
expected_association_rules = self.spark.createDataFrame(
[([3], [1], 1.0), ([2], [1], 1.0)],
["antecedent", "consequent", "confidence"]
)
actual_association_rules = fpm.associationRules
self.assertEqual(actual_association_rules.subtract(expected_association_rules).count(), 0)
self.assertEqual(expected_association_rules.subtract(actual_association_rules).count(), 0)
def test_freq_itemsets(self):
fp = FPGrowth()
fpm = fp.fit(self.data)
expected_freq_itemsets = self.spark.createDataFrame(
[([1], 4), ([2], 3), ([2, 1], 3), ([3], 2), ([3, 1], 2)],
["items", "freq"]
)
actual_freq_itemsets = fpm.freqItemsets
self.assertEqual(actual_freq_itemsets.subtract(expected_freq_itemsets).count(), 0)
self.assertEqual(expected_freq_itemsets.subtract(actual_freq_itemsets).count(), 0)
def tearDown(self):
del self.data
class ImageReaderTest(SparkSessionTestCase):
def test_read_images(self):
data_path = 'data/mllib/images/kittens'
df = ImageSchema.readImages(data_path, recursive=True, dropImageFailures=True)
self.assertEqual(df.count(), 4)
first_row = df.take(1)[0][0]
array = ImageSchema.toNDArray(first_row)
self.assertEqual(len(array), first_row[1])
self.assertEqual(ImageSchema.toImage(array, origin=first_row[0]), first_row)
self.assertEqual(df.schema, ImageSchema.imageSchema)
self.assertEqual(df.schema["image"].dataType, ImageSchema.columnSchema)
expected = {'CV_8UC3': 16, 'Undefined': -1, 'CV_8U': 0, 'CV_8UC1': 0, 'CV_8UC4': 24}
self.assertEqual(ImageSchema.ocvTypes, expected)
expected = ['origin', 'height', 'width', 'nChannels', 'mode', 'data']
self.assertEqual(ImageSchema.imageFields, expected)
self.assertEqual(ImageSchema.undefinedImageType, "Undefined")
with QuietTest(self.sc):
self.assertRaisesRegexp(
TypeError,
"image argument should be pyspark.sql.types.Row; however",
lambda: ImageSchema.toNDArray("a"))
with QuietTest(self.sc):
self.assertRaisesRegexp(
ValueError,
"image argument should have attributes specified in",
lambda: ImageSchema.toNDArray(Row(a=1)))
with QuietTest(self.sc):
self.assertRaisesRegexp(
TypeError,
"array argument should be numpy.ndarray; however, it got",
lambda: ImageSchema.toImage("a"))
class ImageReaderTest2(PySparkTestCase):
@classmethod
def setUpClass(cls):
super(ImageReaderTest2, cls).setUpClass()
# Note that here we enable Hive's support.
cls.spark = None
try:
cls.sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.tearDownClass()
raise unittest.SkipTest("Hive is not available")
except TypeError:
cls.tearDownClass()
raise unittest.SkipTest("Hive is not available")
cls.spark = HiveContext._createForTesting(cls.sc)
@classmethod
def tearDownClass(cls):
super(ImageReaderTest2, cls).tearDownClass()
if cls.spark is not None:
cls.spark.sparkSession.stop()
cls.spark = None
def test_read_images_multiple_times(self):
# This test case is to check if `ImageSchema.readImages` tries to
# initiate Hive client multiple times. See SPARK-22651.
data_path = 'data/mllib/images/kittens'
ImageSchema.readImages(data_path, recursive=True, dropImageFailures=True)
ImageSchema.readImages(data_path, recursive=True, dropImageFailures=True)
class ALSTest(SparkSessionTestCase):
def test_storage_levels(self):
df = self.spark.createDataFrame(
[(0, 0, 4.0), (0, 1, 2.0), (1, 1, 3.0), (1, 2, 4.0), (2, 1, 1.0), (2, 2, 5.0)],
["user", "item", "rating"])
als = ALS().setMaxIter(1).setRank(1)
# test default params
als.fit(df)
self.assertEqual(als.getIntermediateStorageLevel(), "MEMORY_AND_DISK")
self.assertEqual(als._java_obj.getIntermediateStorageLevel(), "MEMORY_AND_DISK")
self.assertEqual(als.getFinalStorageLevel(), "MEMORY_AND_DISK")
self.assertEqual(als._java_obj.getFinalStorageLevel(), "MEMORY_AND_DISK")
# test non-default params
als.setIntermediateStorageLevel("MEMORY_ONLY_2")
als.setFinalStorageLevel("DISK_ONLY")
als.fit(df)
self.assertEqual(als.getIntermediateStorageLevel(), "MEMORY_ONLY_2")
self.assertEqual(als._java_obj.getIntermediateStorageLevel(), "MEMORY_ONLY_2")
self.assertEqual(als.getFinalStorageLevel(), "DISK_ONLY")
self.assertEqual(als._java_obj.getFinalStorageLevel(), "DISK_ONLY")
class DefaultValuesTests(PySparkTestCase):
"""
Test :py:class:`JavaParams` classes to see if their default Param values match
those in their Scala counterparts.
"""
def test_java_params(self):
import pyspark.ml.feature
import pyspark.ml.classification
import pyspark.ml.clustering
import pyspark.ml.evaluation
import pyspark.ml.pipeline
import pyspark.ml.recommendation
import pyspark.ml.regression
modules = [pyspark.ml.feature, pyspark.ml.classification, pyspark.ml.clustering,
pyspark.ml.evaluation, pyspark.ml.pipeline, pyspark.ml.recommendation,
pyspark.ml.regression]
for module in modules:
for name, cls in inspect.getmembers(module, inspect.isclass):
if not name.endswith('Model') and not name.endswith('Params')\
and issubclass(cls, JavaParams) and not inspect.isabstract(cls):
# NOTE: disable check_params_exist until there is parity with Scala API
ParamTests.check_params(self, cls(), check_params_exist=False)
# Additional classes that need explicit construction
from pyspark.ml.feature import CountVectorizerModel
ParamTests.check_params(self, CountVectorizerModel.from_vocabulary(['a'], 'input'),
check_params_exist=False)
def _squared_distance(a, b):
if isinstance(a, Vector):
return a.squared_distance(b)
else:
return b.squared_distance(a)
class VectorTests(MLlibTestCase):
def _test_serialize(self, v):
self.assertEqual(v, ser.loads(ser.dumps(v)))
jvec = self.sc._jvm.org.apache.spark.ml.python.MLSerDe.loads(bytearray(ser.dumps(v)))
nv = ser.loads(bytes(self.sc._jvm.org.apache.spark.ml.python.MLSerDe.dumps(jvec)))
self.assertEqual(v, nv)
vs = [v] * 100
jvecs = self.sc._jvm.org.apache.spark.ml.python.MLSerDe.loads(bytearray(ser.dumps(vs)))
nvs = ser.loads(bytes(self.sc._jvm.org.apache.spark.ml.python.MLSerDe.dumps(jvecs)))
self.assertEqual(vs, nvs)
def test_serialize(self):
self._test_serialize(DenseVector(range(10)))
self._test_serialize(DenseVector(array([1., 2., 3., 4.])))
self._test_serialize(DenseVector(pyarray.array('d', range(10))))
self._test_serialize(SparseVector(4, {1: 1, 3: 2}))
self._test_serialize(SparseVector(3, {}))
self._test_serialize(DenseMatrix(2, 3, range(6)))
sm1 = SparseMatrix(
3, 4, [0, 2, 2, 4, 4], [1, 2, 1, 2], [1.0, 2.0, 4.0, 5.0])
self._test_serialize(sm1)
def test_dot(self):
sv = SparseVector(4, {1: 1, 3: 2})
dv = DenseVector(array([1., 2., 3., 4.]))
lst = DenseVector([1, 2, 3, 4])
mat = array([[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.]])
arr = pyarray.array('d', [0, 1, 2, 3])
self.assertEqual(10.0, sv.dot(dv))
self.assertTrue(array_equal(array([3., 6., 9., 12.]), sv.dot(mat)))
self.assertEqual(30.0, dv.dot(dv))
self.assertTrue(array_equal(array([10., 20., 30., 40.]), dv.dot(mat)))
self.assertEqual(30.0, lst.dot(dv))
self.assertTrue(array_equal(array([10., 20., 30., 40.]), lst.dot(mat)))
self.assertEqual(7.0, sv.dot(arr))
def test_squared_distance(self):
sv = SparseVector(4, {1: 1, 3: 2})
dv = DenseVector(array([1., 2., 3., 4.]))
lst = DenseVector([4, 3, 2, 1])
lst1 = [4, 3, 2, 1]
arr = pyarray.array('d', [0, 2, 1, 3])
narr = array([0, 2, 1, 3])
self.assertEqual(15.0, _squared_distance(sv, dv))
self.assertEqual(25.0, _squared_distance(sv, lst))
self.assertEqual(20.0, _squared_distance(dv, lst))
self.assertEqual(15.0, _squared_distance(dv, sv))
self.assertEqual(25.0, _squared_distance(lst, sv))
self.assertEqual(20.0, _squared_distance(lst, dv))
self.assertEqual(0.0, _squared_distance(sv, sv))
self.assertEqual(0.0, _squared_distance(dv, dv))
self.assertEqual(0.0, _squared_distance(lst, lst))
self.assertEqual(25.0, _squared_distance(sv, lst1))
self.assertEqual(3.0, _squared_distance(sv, arr))
self.assertEqual(3.0, _squared_distance(sv, narr))
def test_hash(self):
v1 = DenseVector([0.0, 1.0, 0.0, 5.5])
v2 = SparseVector(4, [(1, 1.0), (3, 5.5)])
v3 = DenseVector([0.0, 1.0, 0.0, 5.5])
v4 = SparseVector(4, [(1, 1.0), (3, 2.5)])
self.assertEqual(hash(v1), hash(v2))
self.assertEqual(hash(v1), hash(v3))
self.assertEqual(hash(v2), hash(v3))
self.assertFalse(hash(v1) == hash(v4))
self.assertFalse(hash(v2) == hash(v4))
def test_eq(self):
v1 = DenseVector([0.0, 1.0, 0.0, 5.5])
v2 = SparseVector(4, [(1, 1.0), (3, 5.5)])
v3 = DenseVector([0.0, 1.0, 0.0, 5.5])
v4 = SparseVector(6, [(1, 1.0), (3, 5.5)])
v5 = DenseVector([0.0, 1.0, 0.0, 2.5])
v6 = SparseVector(4, [(1, 1.0), (3, 2.5)])
self.assertEqual(v1, v2)
self.assertEqual(v1, v3)
self.assertFalse(v2 == v4)
self.assertFalse(v1 == v5)
self.assertFalse(v1 == v6)
def test_equals(self):
indices = [1, 2, 4]
values = [1., 3., 2.]
self.assertTrue(Vectors._equals(indices, values, list(range(5)), [0., 1., 3., 0., 2.]))
self.assertFalse(Vectors._equals(indices, values, list(range(5)), [0., 3., 1., 0., 2.]))
self.assertFalse(Vectors._equals(indices, values, list(range(5)), [0., 3., 0., 2.]))
self.assertFalse(Vectors._equals(indices, values, list(range(5)), [0., 1., 3., 2., 2.]))
def test_conversion(self):
# numpy arrays should be automatically upcast to float64
# tests for fix of [SPARK-5089]
v = array([1, 2, 3, 4], dtype='float64')
dv = DenseVector(v)
self.assertTrue(dv.array.dtype == 'float64')
v = array([1, 2, 3, 4], dtype='float32')
dv = DenseVector(v)
self.assertTrue(dv.array.dtype == 'float64')
def test_sparse_vector_indexing(self):
sv = SparseVector(5, {1: 1, 3: 2})
self.assertEqual(sv[0], 0.)
self.assertEqual(sv[3], 2.)
self.assertEqual(sv[1], 1.)
self.assertEqual(sv[2], 0.)
self.assertEqual(sv[4], 0.)
self.assertEqual(sv[-1], 0.)
self.assertEqual(sv[-2], 2.)
self.assertEqual(sv[-3], 0.)
self.assertEqual(sv[-5], 0.)
for ind in [5, -6]:
self.assertRaises(IndexError, sv.__getitem__, ind)
for ind in [7.8, '1']:
self.assertRaises(TypeError, sv.__getitem__, ind)
zeros = SparseVector(4, {})
self.assertEqual(zeros[0], 0.0)
self.assertEqual(zeros[3], 0.0)
for ind in [4, -5]:
self.assertRaises(IndexError, zeros.__getitem__, ind)
empty = SparseVector(0, {})
for ind in [-1, 0, 1]:
self.assertRaises(IndexError, empty.__getitem__, ind)
def test_sparse_vector_iteration(self):
self.assertListEqual(list(SparseVector(3, [], [])), [0.0, 0.0, 0.0])
self.assertListEqual(list(SparseVector(5, [0, 3], [1.0, 2.0])), [1.0, 0.0, 0.0, 2.0, 0.0])
def test_matrix_indexing(self):
mat = DenseMatrix(3, 2, [0, 1, 4, 6, 8, 10])
expected = [[0, 6], [1, 8], [4, 10]]
for i in range(3):
for j in range(2):
self.assertEqual(mat[i, j], expected[i][j])
for i, j in [(-1, 0), (4, 1), (3, 4)]:
self.assertRaises(IndexError, mat.__getitem__, (i, j))
def test_repr_dense_matrix(self):
mat = DenseMatrix(3, 2, [0, 1, 4, 6, 8, 10])
self.assertTrue(
repr(mat),
'DenseMatrix(3, 2, [0.0, 1.0, 4.0, 6.0, 8.0, 10.0], False)')
mat = DenseMatrix(3, 2, [0, 1, 4, 6, 8, 10], True)
self.assertTrue(
repr(mat),
'DenseMatrix(3, 2, [0.0, 1.0, 4.0, 6.0, 8.0, 10.0], False)')
mat = DenseMatrix(6, 3, zeros(18))
self.assertTrue(
repr(mat),
'DenseMatrix(6, 3, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ..., \
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], False)')
def test_repr_sparse_matrix(self):
sm1t = SparseMatrix(
3, 4, [0, 2, 3, 5], [0, 1, 2, 0, 2], [3.0, 2.0, 4.0, 9.0, 8.0],
isTransposed=True)
self.assertTrue(
repr(sm1t),
'SparseMatrix(3, 4, [0, 2, 3, 5], [0, 1, 2, 0, 2], [3.0, 2.0, 4.0, 9.0, 8.0], True)')
indices = tile(arange(6), 3)
values = ones(18)
sm = SparseMatrix(6, 3, [0, 6, 12, 18], indices, values)
self.assertTrue(
repr(sm), "SparseMatrix(6, 3, [0, 6, 12, 18], \
[0, 1, 2, 3, 4, 5, 0, 1, ..., 4, 5, 0, 1, 2, 3, 4, 5], \
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ..., \
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], False)")
self.assertTrue(
str(sm),
"6 X 3 CSCMatrix\n\
(0,0) 1.0\n(1,0) 1.0\n(2,0) 1.0\n(3,0) 1.0\n(4,0) 1.0\n(5,0) 1.0\n\
(0,1) 1.0\n(1,1) 1.0\n(2,1) 1.0\n(3,1) 1.0\n(4,1) 1.0\n(5,1) 1.0\n\
(0,2) 1.0\n(1,2) 1.0\n(2,2) 1.0\n(3,2) 1.0\n..\n..")
sm = SparseMatrix(1, 18, zeros(19), [], [])
self.assertTrue(
repr(sm),
'SparseMatrix(1, 18, \
[0, 0, 0, 0, 0, 0, 0, 0, ..., 0, 0, 0, 0, 0, 0, 0, 0], [], [], False)')
def test_sparse_matrix(self):
# Test sparse matrix creation.
sm1 = SparseMatrix(
3, 4, [0, 2, 2, 4, 4], [1, 2, 1, 2], [1.0, 2.0, 4.0, 5.0])
self.assertEqual(sm1.numRows, 3)
self.assertEqual(sm1.numCols, 4)
self.assertEqual(sm1.colPtrs.tolist(), [0, 2, 2, 4, 4])
self.assertEqual(sm1.rowIndices.tolist(), [1, 2, 1, 2])
self.assertEqual(sm1.values.tolist(), [1.0, 2.0, 4.0, 5.0])
self.assertTrue(
repr(sm1),
'SparseMatrix(3, 4, [0, 2, 2, 4, 4], [1, 2, 1, 2], [1.0, 2.0, 4.0, 5.0], False)')
# Test indexing
expected = [
[0, 0, 0, 0],
[1, 0, 4, 0],
[2, 0, 5, 0]]
for i in range(3):
for j in range(4):
self.assertEqual(expected[i][j], sm1[i, j])
self.assertTrue(array_equal(sm1.toArray(), expected))
for i, j in [(-1, 1), (4, 3), (3, 5)]:
self.assertRaises(IndexError, sm1.__getitem__, (i, j))
# Test conversion to dense and sparse.
smnew = sm1.toDense().toSparse()
self.assertEqual(sm1.numRows, smnew.numRows)
self.assertEqual(sm1.numCols, smnew.numCols)
self.assertTrue(array_equal(sm1.colPtrs, smnew.colPtrs))
self.assertTrue(array_equal(sm1.rowIndices, smnew.rowIndices))
self.assertTrue(array_equal(sm1.values, smnew.values))
sm1t = SparseMatrix(
3, 4, [0, 2, 3, 5], [0, 1, 2, 0, 2], [3.0, 2.0, 4.0, 9.0, 8.0],
isTransposed=True)
self.assertEqual(sm1t.numRows, 3)
self.assertEqual(sm1t.numCols, 4)
self.assertEqual(sm1t.colPtrs.tolist(), [0, 2, 3, 5])
self.assertEqual(sm1t.rowIndices.tolist(), [0, 1, 2, 0, 2])
self.assertEqual(sm1t.values.tolist(), [3.0, 2.0, 4.0, 9.0, 8.0])
expected = [
[3, 2, 0, 0],
[0, 0, 4, 0],
[9, 0, 8, 0]]
for i in range(3):
for j in range(4):
self.assertEqual(expected[i][j], sm1t[i, j])
self.assertTrue(array_equal(sm1t.toArray(), expected))
def test_dense_matrix_is_transposed(self):
mat1 = DenseMatrix(3, 2, [0, 4, 1, 6, 3, 9], isTransposed=True)
mat = DenseMatrix(3, 2, [0, 1, 3, 4, 6, 9])
self.assertEqual(mat1, mat)
expected = [[0, 4], [1, 6], [3, 9]]
for i in range(3):
for j in range(2):
self.assertEqual(mat1[i, j], expected[i][j])
self.assertTrue(array_equal(mat1.toArray(), expected))
sm = mat1.toSparse()
self.assertTrue(array_equal(sm.rowIndices, [1, 2, 0, 1, 2]))
self.assertTrue(array_equal(sm.colPtrs, [0, 2, 5]))
self.assertTrue(array_equal(sm.values, [1, 3, 4, 6, 9]))
def test_norms(self):
a = DenseVector([0, 2, 3, -1])
self.assertAlmostEqual(a.norm(2), 3.742, 3)
self.assertTrue(a.norm(1), 6)
self.assertTrue(a.norm(inf), 3)
a = SparseVector(4, [0, 2], [3, -4])
self.assertAlmostEqual(a.norm(2), 5)
self.assertTrue(a.norm(1), 7)
self.assertTrue(a.norm(inf), 4)
tmp = SparseVector(4, [0, 2], [3, 0])
self.assertEqual(tmp.numNonzeros(), 1)
class VectorUDTTests(MLlibTestCase):
dv0 = DenseVector([])
dv1 = DenseVector([1.0, 2.0])
sv0 = SparseVector(2, [], [])
sv1 = SparseVector(2, [1], [2.0])
udt = VectorUDT()
def test_json_schema(self):
self.assertEqual(VectorUDT.fromJson(self.udt.jsonValue()), self.udt)
def test_serialization(self):
for v in [self.dv0, self.dv1, self.sv0, self.sv1]:
self.assertEqual(v, self.udt.deserialize(self.udt.serialize(v)))
def test_infer_schema(self):
rdd = self.sc.parallelize([Row(label=1.0, features=self.dv1),
Row(label=0.0, features=self.sv1)])
df = rdd.toDF()
schema = df.schema
field = [f for f in schema.fields if f.name == "features"][0]
self.assertEqual(field.dataType, self.udt)
vectors = df.rdd.map(lambda p: p.features).collect()
self.assertEqual(len(vectors), 2)
for v in vectors:
if isinstance(v, SparseVector):
self.assertEqual(v, self.sv1)
elif isinstance(v, DenseVector):
self.assertEqual(v, self.dv1)
else:
raise TypeError("expecting a vector but got %r of type %r" % (v, type(v)))
class MatrixUDTTests(MLlibTestCase):
dm1 = DenseMatrix(3, 2, [0, 1, 4, 5, 9, 10])
dm2 = DenseMatrix(3, 2, [0, 1, 4, 5, 9, 10], isTransposed=True)
sm1 = SparseMatrix(1, 1, [0, 1], [0], [2.0])
sm2 = SparseMatrix(2, 1, [0, 0, 1], [0], [5.0], isTransposed=True)
udt = MatrixUDT()
def test_json_schema(self):
self.assertEqual(MatrixUDT.fromJson(self.udt.jsonValue()), self.udt)
def test_serialization(self):
for m in [self.dm1, self.dm2, self.sm1, self.sm2]:
self.assertEqual(m, self.udt.deserialize(self.udt.serialize(m)))
def test_infer_schema(self):
rdd = self.sc.parallelize([("dense", self.dm1), ("sparse", self.sm1)])
df = rdd.toDF()
schema = df.schema
self.assertTrue(schema.fields[1].dataType, self.udt)
matrices = df.rdd.map(lambda x: x._2).collect()
self.assertEqual(len(matrices), 2)
for m in matrices:
if isinstance(m, DenseMatrix):
self.assertTrue(m, self.dm1)
elif isinstance(m, SparseMatrix):
self.assertTrue(m, self.sm1)
else:
raise ValueError("Expected a matrix but got type %r" % type(m))
class WrapperTests(MLlibTestCase):
def test_new_java_array(self):
# test array of strings
str_list = ["a", "b", "c"]
java_class = self.sc._gateway.jvm.java.lang.String
java_array = JavaWrapper._new_java_array(str_list, java_class)
self.assertEqual(_java2py(self.sc, java_array), str_list)
# test array of integers
int_list = [1, 2, 3]
java_class = self.sc._gateway.jvm.java.lang.Integer
java_array = JavaWrapper._new_java_array(int_list, java_class)
self.assertEqual(_java2py(self.sc, java_array), int_list)
# test array of floats
float_list = [0.1, 0.2, 0.3]
java_class = self.sc._gateway.jvm.java.lang.Double
java_array = JavaWrapper._new_java_array(float_list, java_class)
self.assertEqual(_java2py(self.sc, java_array), float_list)
# test array of bools
bool_list = [False, True, True]
java_class = self.sc._gateway.jvm.java.lang.Boolean
java_array = JavaWrapper._new_java_array(bool_list, java_class)
self.assertEqual(_java2py(self.sc, java_array), bool_list)
# test array of Java DenseVectors
v1 = DenseVector([0.0, 1.0])
v2 = DenseVector([1.0, 0.0])
vec_java_list = [_py2java(self.sc, v1), _py2java(self.sc, v2)]
java_class = self.sc._gateway.jvm.org.apache.spark.ml.linalg.DenseVector
java_array = JavaWrapper._new_java_array(vec_java_list, java_class)
self.assertEqual(_java2py(self.sc, java_array), [v1, v2])
# test empty array
java_class = self.sc._gateway.jvm.java.lang.Integer
java_array = JavaWrapper._new_java_array([], java_class)
self.assertEqual(_java2py(self.sc, java_array), [])
class ChiSquareTestTests(SparkSessionTestCase):
def test_chisquaretest(self):
data = [[0, Vectors.dense([0, 1, 2])],
[1, Vectors.dense([1, 1, 1])],
[2, Vectors.dense([2, 1, 0])]]
df = self.spark.createDataFrame(data, ['label', 'feat'])
res = ChiSquareTest.test(df, 'feat', 'label')
# This line is hitting the collect bug described in #17218, commented for now.
# pValues = res.select("degreesOfFreedom").collect())
self.assertIsInstance(res, DataFrame)
fieldNames = set(field.name for field in res.schema.fields)
expectedFields = ["pValues", "degreesOfFreedom", "statistics"]
self.assertTrue(all(field in fieldNames for field in expectedFields))
class UnaryTransformerTests(SparkSessionTestCase):
def test_unary_transformer_validate_input_type(self):
shiftVal = 3
transformer = MockUnaryTransformer(shiftVal=shiftVal)\
.setInputCol("input").setOutputCol("output")
# should not raise any errors
transformer.validateInputType(DoubleType())
with self.assertRaises(TypeError):
# passing the wrong input type should raise an error
transformer.validateInputType(IntegerType())
def test_unary_transformer_transform(self):
shiftVal = 3
transformer = MockUnaryTransformer(shiftVal=shiftVal)\
.setInputCol("input").setOutputCol("output")
df = self.spark.range(0, 10).toDF('input')
df = df.withColumn("input", df.input.cast(dataType="double"))
transformed_df = transformer.transform(df)
results = transformed_df.select("input", "output").collect()
for res in results:
self.assertEqual(res.input + shiftVal, res.output)
class EstimatorTest(unittest.TestCase):
def testDefaultFitMultiple(self):
N = 4
data = MockDataset()
estimator = MockEstimator()
params = [{estimator.fake: i} for i in range(N)]
modelIter = estimator.fitMultiple(data, params)
indexList = []
for index, model in modelIter:
self.assertEqual(model.getFake(), index)
indexList.append(index)
self.assertEqual(sorted(indexList), list(range(N)))
if __name__ == "__main__":
from pyspark.ml.tests import *
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.main()
|
brad-kaiser/spark
|
python/pyspark/ml/tests.py
|
Python
|
apache-2.0
| 109,332
|
[
"Gaussian"
] |
1cdc333f55d4e1850c98fcf5d463cf4cf4ea3fc5bf2edd1989a9c29bbdda9029
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute resource tracking."""
import uuid
from oslo.config import cfg
from nova.compute import instance_types
from nova.compute import resource_tracker
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import test
from nova.virt import driver
LOG = logging.getLogger(__name__)
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_LOCAL_GB = 6
FAKE_VIRT_VCPUS = 1
CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
"""Pretend version of a lame virt driver."""
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
def legacy_nwinfo(self):
return True
class FakeVirtDriver(driver.ComputeDriver):
def __init__(self):
super(FakeVirtDriver, self).__init__(None)
self.memory_mb = FAKE_VIRT_MEMORY_MB
self.local_gb = FAKE_VIRT_LOCAL_GB
self.vcpus = FAKE_VIRT_VCPUS
self.memory_mb_used = 0
self.local_gb_used = 0
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': 0,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
}
return d
def legacy_nwinfo(self):
return True
class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self.context = context.get_admin_context()
self.flags(use_local=True, group='conductor')
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self._instances = {}
self._instance_types = {}
self.stubs.Set(self.conductor.db,
'instance_get_all_by_host_and_node',
self._fake_instance_get_all_by_host_and_node)
self.stubs.Set(self.conductor.db,
'instance_update_and_get_original',
self._fake_instance_update_and_get_original)
self.stubs.Set(self.conductor.db,
'instance_type_get', self._fake_instance_type_get)
self.host = 'fakehost'
def _create_compute_node(self, values=None):
compute = {
"id": 1,
"service_id": 1,
"vcpus": 1,
"memory_mb": 1,
"local_gb": 1,
"vcpus_used": 1,
"memory_mb_used": 1,
"local_gb_used": 1,
"free_ram_mb": 1,
"free_disk_gb": 1,
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
"stats": [{"key": "num_instances", "value": "1"}],
"hypervisor_hostname": "fakenode",
}
if values:
compute.update(values)
return compute
def _create_service(self, host="fakehost", compute=None):
if compute:
compute = [compute]
service = {
"id": 1,
"host": host,
"binary": "nova-compute",
"topic": "compute",
"compute_node": compute,
}
return service
def _fake_instance_system_metadata(self, instance_type, prefix=''):
sys_meta = []
for key in instance_types.system_metadata_instance_type_props.keys():
sys_meta.append({'key': '%sinstance_type_%s' % (prefix, key),
'value': instance_type[key]})
return sys_meta
def _fake_instance(self, stash=True, **kwargs):
# Default to an instance ready to resize to or from the same
# instance_type
itype = self._fake_instance_type_create()
sys_meta = self._fake_instance_system_metadata(itype)
if stash:
# stash instance types in system metadata.
sys_meta = (sys_meta +
self._fake_instance_system_metadata(itype, 'new_') +
self._fake_instance_system_metadata(itype, 'old_'))
instance_uuid = str(uuid.uuid1())
instance = {
'uuid': instance_uuid,
'vm_state': vm_states.RESIZED,
'task_state': None,
'memory_mb': 2,
'root_gb': 3,
'ephemeral_gb': 1,
'os_type': 'Linux',
'project_id': '123456',
'vcpus': 1,
'host': None,
'node': None,
'instance_type_id': 1,
'launched_on': None,
'system_metadata': sys_meta,
}
instance.update(kwargs)
self._instances[instance_uuid] = instance
return instance
def _fake_instance_type_create(self, **kwargs):
instance_type = {
'id': 1,
'name': 'fakeitype',
'memory_mb': FAKE_VIRT_MEMORY_MB,
'vcpus': FAKE_VIRT_VCPUS,
'root_gb': FAKE_VIRT_LOCAL_GB / 2,
'ephemeral_gb': FAKE_VIRT_LOCAL_GB / 2,
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'flavorid': 'fakeflavor'
}
instance_type.update(**kwargs)
id_ = instance_type['id']
self._instance_types[id_] = instance_type
return instance_type
def _fake_instance_get_all_by_host_and_node(self, context, host, nodename):
return [i for i in self._instances.values() if i['host'] == host]
def _fake_instance_type_get(self, ctxt, id_):
return self._instance_types[id_]
def _fake_instance_update_and_get_original(self, context, instance_uuid,
values):
instance = self._instances[instance_uuid]
instance.update(values)
# the test doesn't care what the original instance values are, it's
# only used in the subsequent notification:
return (instance, instance)
def _driver(self):
return FakeVirtDriver()
def _tracker(self, host=None):
if host is None:
host = self.host
node = "fakenode"
driver = self._driver()
tracker = resource_tracker.ResourceTracker(host, driver, node)
return tracker
class UnsupportedDriverTestCase(BaseTestCase):
"""Resource tracking should be disabled when the virt driver doesn't
support it.
"""
def setUp(self):
super(UnsupportedDriverTestCase, self).setUp()
self.tracker = self._tracker()
# seed tracker with data:
self.tracker.update_available_resource(self.context)
def _driver(self):
return UnsupportedVirtDriver()
def test_disabled(self):
# disabled = no compute node stats
self.assertTrue(self.tracker.disabled)
self.assertEqual(None, self.tracker.compute_node)
def test_disabled_claim(self):
# basic claim:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_claim(self):
# instance variation:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_context_claim(self):
# instance context manager variation:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
with self.tracker.instance_claim(self.context, instance) as claim:
self.assertEqual(0, claim.memory_mb)
def test_disabled_updated_usage(self):
instance = self._fake_instance(host='fakehost', memory_mb=5,
root_gb=10)
self.tracker.update_usage(self.context, instance)
def test_disabled_resize_claim(self):
instance = self._fake_instance()
instance_type = self._fake_instance_type_create()
claim = self.tracker.resize_claim(self.context, instance,
instance_type)
self.assertEqual(0, claim.memory_mb)
self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
self.assertEqual(instance_type['id'],
claim.migration['new_instance_type_id'])
def test_disabled_resize_context_claim(self):
instance = self._fake_instance()
instance_type = self._fake_instance_type_create()
with self.tracker.resize_claim(self.context, instance, instance_type) \
as claim:
self.assertEqual(0, claim.memory_mb)
class MissingServiceTestCase(BaseTestCase):
def setUp(self):
super(MissingServiceTestCase, self).setUp()
self.context = context.get_admin_context()
self.tracker = self._tracker()
def test_missing_service(self):
self.tracker.update_available_resource(self.context)
self.assertTrue(self.tracker.disabled)
class MissingComputeNodeTestCase(BaseTestCase):
def setUp(self):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_create',
self._fake_create_compute_node)
def _fake_create_compute_node(self, context, values):
self.created = True
return self._create_compute_node()
def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
return service
def test_create_compute_node(self):
self.tracker.update_available_resource(self.context)
self.assertTrue(self.created)
def test_enabled(self):
self.tracker.update_available_resource(self.context)
self.assertFalse(self.tracker.disabled)
class BaseTrackerTestCase(BaseTestCase):
def setUp(self):
# setup plumbing for a working resource tracker with required
# database models and a compatible compute driver:
super(BaseTrackerTestCase, self).setUp()
self.updated = False
self.deleted = False
self.tracker = self._tracker()
self._migrations = {}
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_update',
self._fake_compute_node_update)
self.stubs.Set(db, 'compute_node_delete',
self._fake_compute_node_delete)
self.stubs.Set(db, 'migration_update',
self._fake_migration_update)
self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node',
self._fake_migration_get_in_progress_by_host_and_node)
self.tracker.update_available_resource(self.context)
self.limits = self._limits()
def _fake_service_get_by_compute_host(self, ctx, host):
self.compute = self._create_compute_node()
self.service = self._create_service(host, compute=self.compute)
return self.service
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.updated = True
values['stats'] = [{"key": "num_instances", "value": "1"}]
self.compute.update(values)
return self.compute
def _fake_compute_node_delete(self, ctx, compute_node_id):
self.deleted = True
self.compute.update({'deleted': 1})
return self.compute
def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
node):
status = ['confirmed', 'reverted']
migrations = []
for migration in self._migrations.values():
if migration['status'] in status:
continue
uuid = migration['instance_uuid']
migration['instance'] = self._instances[uuid]
migrations.append(migration)
return migrations
def _fake_migration_update(self, ctxt, migration_id, values):
# cheat and assume there's only 1 migration present
migration = self._migrations.values()[0]
migration.update(values)
return migration
def _limits(self, memory_mb=FAKE_VIRT_MEMORY_MB,
disk_gb=FAKE_VIRT_LOCAL_GB, vcpus=FAKE_VIRT_VCPUS):
"""Create limits dictionary used for oversubscribing resources."""
return {
'memory_mb': memory_mb,
'disk_gb': disk_gb,
'vcpu': vcpus
}
def _assert(self, value, field, tracker=None):
if tracker is None:
tracker = self.tracker
if field not in tracker.compute_node:
raise test.TestingException(
"'%(field)s' not in compute node." % locals())
x = tracker.compute_node[field]
self.assertEqual(value, x)
class TrackerTestCase(BaseTrackerTestCase):
def test_free_ram_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_ram_mb'])
def test_free_disk_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_disk_gb'])
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
class InstanceClaimTestCase(BaseTrackerTestCase):
def test_update_usage_only_for_tracked(self):
instance = self._fake_instance(memory_mb=3, root_gb=1, ephemeral_gb=1,
task_state=None)
self.tracker.update_usage(self.context, instance)
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'current_workload')
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertNotEqual(0, claim.memory_mb)
self._assert(3, 'memory_mb_used')
self._assert(2, 'local_gb_used')
# now update should actually take effect
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self._assert(3, 'memory_mb_used')
self._assert(2, 'local_gb_used')
self._assert(1, 'current_workload')
def test_claim_and_audit(self):
claim_mem = 3
claim_disk = 2
instance = self._fake_instance(memory_mb=claim_mem, root_gb=claim_disk,
ephemeral_gb=0)
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertEqual(5, self.compute["memory_mb"])
self.assertEqual(claim_mem, self.compute["memory_mb_used"])
self.assertEqual(5 - claim_mem, self.compute["free_ram_mb"])
self.assertEqual(6, self.compute["local_gb"])
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(6 - claim_disk, self.compute["free_disk_gb"])
# 1st pretend that the compute operation finished and claimed the
# desired resources from the virt layer
driver = self.tracker.driver
driver.memory_mb_used = claim_mem
driver.local_gb_used = claim_disk
self.tracker.update_available_resource(self.context)
# confirm that resource usage is derived from instance usages,
# not virt layer:
self.assertEqual(claim_mem, self.compute['memory_mb_used'])
self.assertEqual(5 - claim_mem, self.compute['free_ram_mb'])
self.assertEqual(claim_disk, self.compute['local_gb_used'])
self.assertEqual(6 - claim_disk, self.compute['free_disk_gb'])
def test_claim_and_abort(self):
claim_mem = 3
claim_disk = 2
instance = self._fake_instance(memory_mb=claim_mem,
root_gb=claim_disk, ephemeral_gb=0)
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertNotEqual(None, claim)
self.assertEqual(claim_mem, self.compute["memory_mb_used"])
self.assertEqual(5 - claim_mem, self.compute["free_ram_mb"])
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(6 - claim_disk, self.compute["free_disk_gb"])
claim.abort()
self.assertEqual(0, self.compute["memory_mb_used"])
self.assertEqual(5, self.compute["free_ram_mb"])
self.assertEqual(0, self.compute["local_gb_used"])
self.assertEqual(6, self.compute["free_disk_gb"])
def test_instance_claim_with_oversubscription(self):
memory_mb = FAKE_VIRT_MEMORY_MB * 2
root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
vcpus = FAKE_VIRT_VCPUS * 2
limits = {'memory_mb': memory_mb, 'disk_gb': root_gb * 2,
'vcpu': vcpus}
instance = self._fake_instance(memory_mb=memory_mb,
root_gb=root_gb, ephemeral_gb=ephemeral_gb)
self.tracker.instance_claim(self.context, instance, limits)
self.assertEqual(memory_mb,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(root_gb * 2,
self.tracker.compute_node['local_gb_used'])
def test_additive_claims(self):
self.limits['vcpu'] = 2
instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=1,
vcpus=1)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=1,
vcpus=1)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
self.assertEqual(2, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(4, self.tracker.compute_node['local_gb_used'])
self.assertEqual(2, self.tracker.compute_node['vcpus_used'])
def test_context_claim_with_exception(self):
instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=1)
try:
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
raise test.TestingException()
except test.TestingException:
pass
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
self.assertEqual(0, self.compute['memory_mb_used'])
self.assertEqual(0, self.compute['local_gb_used'])
def test_instance_context_claim(self):
instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=1)
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
self.assertEqual(1, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(2, self.tracker.compute_node['local_gb_used'])
self.assertEqual(1, self.compute['memory_mb_used'])
self.assertEqual(2, self.compute['local_gb_used'])
# after exiting claim context, build is marked as finished. usage
# totals should be same:
self.tracker.update_available_resource(self.context)
self.assertEqual(1, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(2, self.tracker.compute_node['local_gb_used'])
self.assertEqual(1, self.compute['memory_mb_used'])
self.assertEqual(2, self.compute['local_gb_used'])
def test_update_load_stats_for_instance(self):
instance = self._fake_instance(task_state=task_states.SCHEDULING)
with self.tracker.instance_claim(self.context, instance):
pass
self.assertEqual(1, self.tracker.compute_node['current_workload'])
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
instance['host'] = 'fakehost'
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
def test_cpu_stats(self):
limits = {'disk_gb': 100, 'memory_mb': 100}
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
instance = self._fake_instance(vcpus=1)
# should not do anything until a claim is made:
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(1, self.tracker.compute_node['vcpus_used'])
# instance state can change without modifying vcpus in use:
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self.assertEqual(1, self.tracker.compute_node['vcpus_used'])
instance = self._fake_instance(vcpus=10)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(11, self.tracker.compute_node['vcpus_used'])
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
self.assertEqual(1, self.tracker.compute_node['vcpus_used'])
def test_skip_deleted_instances(self):
# ensure that the audit process skips instances that have vm_state
# DELETED, but the DB record is not yet deleted.
self._fake_instance(vm_state=vm_states.DELETED, host=self.host)
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
class ResizeClaimTestCase(BaseTrackerTestCase):
def setUp(self):
super(ResizeClaimTestCase, self).setUp()
self.stubs.Set(self.conductor.db,
'migration_create', self._fake_migration_create)
self.instance = self._fake_instance()
self.instance_type = self._fake_instance_type_create()
def _fake_migration_create(self, context, values=None):
instance_uuid = str(uuid.uuid1())
migration = {
'id': 1,
'source_compute': 'host1',
'source_node': 'fakenode',
'dest_compute': 'host2',
'dest_node': 'fakenode',
'dest_host': '127.0.0.1',
'old_instance_type_id': 1,
'new_instance_type_id': 2,
'instance_uuid': instance_uuid,
'status': 'pre-migrating',
'updated_at': timeutils.utcnow()
}
if values:
migration.update(values)
self._migrations[migration['instance_uuid']] = migration
return migration
def test_claim(self):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_migrations))
def test_abort(self):
try:
with self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits):
raise test.TestingException("abort")
except test.TestingException:
pass
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self.assertEqual(0, len(self.tracker.tracked_migrations))
def test_additive_claims(self):
limits = self._limits(FAKE_VIRT_MEMORY_MB * 2, FAKE_VIRT_LOCAL_GB * 2,
FAKE_VIRT_VCPUS * 2)
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, limits)
instance2 = self._fake_instance()
self.tracker.resize_claim(self.context, instance2, self.instance_type,
limits)
self._assert(2 * FAKE_VIRT_MEMORY_MB, 'memory_mb_used')
self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
def test_claim_and_audit(self):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self.tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
def test_same_host(self):
self.limits['vcpu'] = 3
src_type = self._fake_instance_type_create(id=2, memory_mb=1,
root_gb=1, ephemeral_gb=0, vcpus=1)
dest_type = self._fake_instance_type_create(id=2, memory_mb=2,
root_gb=2, ephemeral_gb=1, vcpus=2)
# make an instance of src_type:
instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=0,
vcpus=1, instance_type_id=2)
instance['system_metadata'] = self._fake_instance_system_metadata(
dest_type)
self.tracker.instance_claim(self.context, instance, self.limits)
# resize to dest_type:
claim = self.tracker.resize_claim(self.context, instance,
dest_type, self.limits)
self._assert(3, 'memory_mb_used')
self._assert(4, 'local_gb_used')
self._assert(3, 'vcpus_used')
self.tracker.update_available_resource(self.context)
claim.abort()
# only the original instance should remain, not the migration:
self._assert(1, 'memory_mb_used')
self._assert(1, 'local_gb_used')
self._assert(1, 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
def test_revert(self):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
migration, itype = self.tracker.tracked_migrations[
self.instance['uuid']]
self.tracker.revert_resize(self.context, migration)
self.assertEqual(0, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
def test_revert_reserve_source(self):
# if a revert has started at the API and audit runs on
# the source compute before the instance flips back to source,
# resources should still be held at the source based on the
# migration:
dest = "desthost"
dest_tracker = self._tracker(host=dest)
dest_tracker.update_available_resource(self.context)
self.instance = self._fake_instance(memory_mb=FAKE_VIRT_MEMORY_MB,
root_gb=FAKE_VIRT_LOCAL_GB, ephemeral_gb=0,
vcpus=FAKE_VIRT_VCPUS, instance_type_id=1)
values = {'source_compute': self.host, 'dest_compute': dest,
'old_instance_type_id': 1, 'new_instance_type_id': 1,
'status': 'post-migrating',
'instance_uuid': self.instance['uuid']}
migration = self._fake_migration_create(self.context, values)
# attach an instance to the destination host tracker:
dest_tracker.instance_claim(self.context, self.instance)
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb_used',
tracker=dest_tracker)
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
tracker=dest_tracker)
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
tracker=dest_tracker)
# audit and recheck to confirm migration doesn't get double counted
# on dest:
dest_tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb_used',
tracker=dest_tracker)
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
tracker=dest_tracker)
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
tracker=dest_tracker)
# apply the migration to the source host tracker:
self.tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
# flag the instance and migration as reverting and re-audit:
self.instance['vm_state'] = vm_states.RESIZED
self.instance['task_state'] = task_states.RESIZE_REVERTING
self.tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
def test_resize_filter(self):
instance = self._fake_instance(vm_state=vm_states.ACTIVE,
task_state=task_states.SUSPENDING)
self.assertFalse(self.tracker._instance_in_resize_state(instance))
instance = self._fake_instance(vm_state=vm_states.RESIZED,
task_state=task_states.SUSPENDING)
self.assertTrue(self.tracker._instance_in_resize_state(instance))
instance = self._fake_instance(vm_state=vm_states.ACTIVE,
task_state=task_states.RESIZE_MIGRATING)
self.assertTrue(self.tracker._instance_in_resize_state(instance))
def test_dupe_filter(self):
self._fake_instance_type_create(id=2, memory_mb=1, root_gb=1,
ephemeral_gb=1, vcpus=1)
instance = self._fake_instance(host=self.host)
values = {'source_compute': self.host, 'dest_compute': self.host,
'instance_uuid': instance['uuid'], 'new_instance_type_id': 2}
self._fake_migration_create(self.context, values)
self._fake_migration_create(self.context, values)
self.tracker.update_available_resource(self.context)
self.assertEqual(1, len(self.tracker.tracked_migrations))
def test_set_instance_host_and_node(self):
instance = self._fake_instance()
self.assertEqual(None, instance['host'])
self.assertEqual(None, instance['launched_on'])
self.assertEqual(None, instance['node'])
claim = self.tracker.instance_claim(self.context, instance)
self.assertNotEqual(0, claim.memory_mb)
self.assertEqual('fakehost', instance['host'])
self.assertEqual('fakehost', instance['launched_on'])
self.assertEqual('fakenode', instance['node'])
class NoInstanceTypesInSysMetadata(ResizeClaimTestCase):
"""Make sure we handle the case where the following are true:
1) Compute node C gets upgraded to code that looks for instance types in
system metadata. AND
2) C already has instances in the process of migrating that do not have
stashed instance types.
bug 1164110
"""
def setUp(self):
super(NoInstanceTypesInSysMetadata, self).setUp()
self.instance = self._fake_instance(stash=False)
class OrphanTestCase(BaseTrackerTestCase):
def _driver(self):
class OrphanVirtDriver(FakeVirtDriver):
def get_per_instance_usage(self):
return {
'1-2-3-4-5': {'memory_mb': 4, 'uuid': '1-2-3-4-5'},
'2-3-4-5-6': {'memory_mb': 4, 'uuid': '2-3-4-5-6'},
}
return OrphanVirtDriver()
def test_usage(self):
# 2 instances, 4 mb each
self.assertEqual(8, self.tracker.compute_node['memory_mb_used'])
def test_find(self):
# create one legit instance and verify the 2 orphans remain
self._fake_instance()
orphans = self.tracker._find_orphaned_instances()
self.assertEqual(2, len(orphans))
|
Triv90/Nova
|
nova/tests/compute/test_resource_tracker.py
|
Python
|
apache-2.0
| 33,938
|
[
"exciting"
] |
b121583c2e399e96bb02a9998d5f35ca6e6f1faba4bf4ce7af0e41825e51b5db
|
import cv2
from matplotlib import pyplot as plt
orig = cv2.imread("noise3.jpg", cv2.IMREAD_GRAYSCALE)
#orig = cv2.imread("noise2.jpg", cv2.IMREAD_GRAYSCALE)
# Gaussian blur for different kernel size
gaussian_blur_3 = cv2.GaussianBlur(orig, (3, 3), 0)
gaussian_blur_5 = cv2.GaussianBlur(orig, (5, 5), 0)
gaussian_blur_7 = cv2.GaussianBlur(orig, (7, 7), 0)
fig = plt.figure()
images = [orig, gaussian_blur_3, gaussian_blur_5, gaussian_blur_7]
titles = ["Original", "3", "5", "7"]
for i in range(len(images)):
ax = fig.add_subplot(2, 2, i + 1)
ax.set_title(titles[i])
ax.imshow(images[i], cmap="gray")
plt.axis("off")
plt.show()
|
anooptoffy/Masters-Course-Work-Repository
|
Semester_2/Machine Perception/Assignment1/question_6.py
|
Python
|
mit
| 648
|
[
"Gaussian"
] |
56caf4f29f9717fc6f50819321a1dceae496c9bd597b02eacfb8fb26ec4d926a
|
"""
Spglib interface for ASE
"""
import pyspglib._spglib as spg
import numpy as np
from pylada import periodic_table
def get_spacegroup(bulk, symprec=1e-5, angle_tolerance=-1.0):
"""
Return space group in international table symbol and number
as a string.
"""
lattice = np.array(bulk.cell*bulk.scale, dtype='double', order='C')
# print 'vladan get_spacegroup: lattice:\n', lattice
posMat = np.array([np.dot(atom.pos, np.linalg.inv(np.transpose(bulk.cell)) ) for atom in bulk], dtype='double', order='C')
# print 'vladan get_spacegroup: posMat:\n', posMat
syms = np.array([periodic_table.symbols.index(atom.type)+1 for atom in bulk], dtype='intc')
# print 'vladan get_spacegroup: syms:\n', syms
# Atomic positions have to be specified by scaled positions for spglib.
return spg.spacegroup(
lattice,
posMat,
syms,
symprec,
angle_tolerance)
#def get_symmetry(bulk, use_magmoms=False, symprec=1e-5, angle_tolerance=-1.0):
def get_symmetry(bulk, symprec=1e-5, angle_tolerance=-1.0):
"""
Return symmetry operations as hash.
Hash key 'rotations' gives the numpy integer array
of the rotation matrices for scaled positions
Hash key 'translations' gives the numpy double array
of the translation vectors in scaled positions
"""
# Atomic positions have to be specified by scaled positions for spglib.
# positions = np.array(bulk.get_scaled_positions(), dtype='double', order='C')
# lattice = np.array(bulk.get_cell().T, dtype='double', order='C')
# numbers = np.array(bulk.get_atomic_numbers(), dtype='intc')
positions = np.array([np.dot(atom.pos, np.linalg.inv(np.transpose(bulk.cell)) ) for atom in bulk], dtype='double', order='C')
lattice = np.array(bulk.cell*bulk.scale, dtype='double', order='C')
numbers = np.array([periodic_table.symbols.index(atom.type)+1 for atom in bulk], dtype='intc')
# Get number of symmetry operations and allocate symmetry operations
# multi = spg.multiplicity(cell, positions, numbers, symprec)
# multi = 48 * bulk.get_number_of_atoms()
multi = 48 * len(bulk)
rotation = np.zeros((multi, 3, 3), dtype='intc')
translation = np.zeros((multi, 3), dtype='double')
# Get symmetry operations
# if use_magmoms:
# magmoms = bulk.get_magnetic_moments()
# num_sym = spg.symmetry_with_collinear_spin(rotation,
# translation,
# lattice,
# positions,
# numbers,
# magmoms,
# symprec,
# angle_tolerance)
# else:
# num_sym = spg.symmetry(rotation,
# translation,
# lattice,
# positions,
# numbers,
# symprec,
# angle_tolerance)
num_sym = spg.symmetry(rotation,
translation,
lattice,
positions,
numbers,
symprec,
angle_tolerance)
return {'rotations': np.array(rotation[:num_sym], dtype='intc', order='C'),
'translations': np.array(translation[:num_sym],
dtype='double', order='C')}
'''
def get_symmetry(bulk, use_magmoms=False, symprec=1e-5, angle_tolerance=-1.0):
"""
Return symmetry operations as hash.
Hash key 'rotations' gives the numpy integer array
of the rotation matrices for scaled positions
Hash key 'translations' gives the numpy double array
of the translation vectors in scaled positions
"""
# Atomic positions have to be specified by scaled positions for spglib.
positions = np.array(bulk.get_scaled_positions(), dtype='double', order='C')
lattice = np.array(bulk.get_cell().T, dtype='double', order='C')
numbers = np.array(bulk.get_atomic_numbers(), dtype='intc')
# Get number of symmetry operations and allocate symmetry operations
# multi = spg.multiplicity(cell, positions, numbers, symprec)
multi = 48 * bulk.get_number_of_atoms()
rotation = np.zeros((multi, 3, 3), dtype='intc')
translation = np.zeros((multi, 3), dtype='double')
# Get symmetry operations
if use_magmoms:
magmoms = bulk.get_magnetic_moments()
num_sym = spg.symmetry_with_collinear_spin(rotation,
translation,
lattice,
positions,
numbers,
magmoms,
symprec,
angle_tolerance)
else:
num_sym = spg.symmetry(rotation,
translation,
lattice,
positions,
numbers,
symprec,
angle_tolerance)
return {'rotations': np.array(rotation[:num_sym], dtype='intc', order='C'),
'translations': np.array(translation[:num_sym],
dtype='double', order='C')}
def get_symmetry_dataset(bulk, symprec=1e-5, angle_tolerance=-1.0):
"""
number: International space group number
international: International symbol
hall: Hall symbol
transformation_matrix:
Transformation matrix from lattice of input cell to Bravais lattice
L^bravais = L^original * Tmat
origin shift: Origin shift in the setting of 'Bravais lattice'
rotations, translations:
Rotation matrices and translation vectors
Space group operations are obtained by
[(r,t) for r, t in zip(rotations, translations)]
wyckoffs:
Wyckoff letters
"""
positions = np.array(bulk.get_scaled_positions(), dtype='double', order='C')
lattice = np.array(bulk.get_cell().T, dtype='double', order='C')
numbers = np.array(bulk.get_atomic_numbers(), dtype='intc')
keys = ('number',
'international',
'hall',
'transformation_matrix',
'origin_shift',
'rotations',
'translations',
'wyckoffs',
'equivalent_atoms')
dataset = {}
for key, data in zip(keys, spg.dataset(lattice,
positions,
numbers,
symprec,
angle_tolerance)):
dataset[key] = data
dataset['international'] = dataset['international'].strip()
dataset['hall'] = dataset['hall'].strip()
dataset['transformation_matrix'] = np.array(
dataset['transformation_matrix'], dtype='double', order='C')
dataset['origin_shift'] = np.array(dataset['origin_shift'], dtype='double')
dataset['rotations'] = np.array(dataset['rotations'],
dtype='intc', order='C')
dataset['translations'] = np.array(dataset['translations'],
dtype='double', order='C')
letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
dataset['wyckoffs'] = [letters[x] for x in dataset['wyckoffs']]
dataset['equivalent_atoms'] = np.array(dataset['equivalent_atoms'],
dtype='intc')
return dataset
def get_spacegroup(bulk, symprec=1e-5, angle_tolerance=-1.0):
"""
Return space group in international table symbol and number
as a string.
"""
# Atomic positions have to be specified by scaled positions for spglib.
return spg.spacegroup(
np.array(bulk.get_cell().T, dtype='double', order='C'),
np.array(bulk.get_scaled_positions(), dtype='double', order='C'),
np.array(bulk.get_atomic_numbers(), dtype='intc'),
symprec,
angle_tolerance)
def get_pointgroup(rotations):
"""
Return point group in international table symbol and number.
The symbols are mapped to the numbers as follows:
1 "1 "
2 "-1 "
3 "2 "
4 "m "
5 "2/m "
6 "222 "
7 "mm2 "
8 "mmm "
9 "4 "
10 "-4 "
11 "4/m "
12 "422 "
13 "4mm "
14 "-42m "
15 "4/mmm"
16 "3 "
17 "-3 "
18 "32 "
19 "3m "
20 "-3m "
21 "6 "
22 "-6 "
23 "6/m "
24 "622 "
25 "6mm "
26 "-62m "
27 "6/mmm"
28 "23 "
29 "m-3 "
30 "432 "
31 "-43m "
32 "m-3m "
"""
# (symbol, pointgroup_number, transformation_matrix)
return spg.pointgroup(np.array(rotations, dtype='intc', order='C'))
def refine_cell(bulk, symprec=1e-5, angle_tolerance=-1.0):
"""
Return refined cell
"""
# Atomic positions have to be specified by scaled positions for spglib.
num_atom = bulk.get_number_of_atoms()
lattice = np.array(bulk.get_cell().T, dtype='double', order='C')
pos = np.zeros((num_atom * 4, 3), dtype='double')
pos[:num_atom] = bulk.get_scaled_positions()
numbers = np.zeros(num_atom * 4, dtype='intc')
numbers[:num_atom] = np.array(bulk.get_atomic_numbers(), dtype='intc')
num_atom_bravais = spg.refine_cell(lattice,
pos,
numbers,
num_atom,
symprec,
angle_tolerance)
return (np.array(lattice.T, dtype='double', order='C'),
np.array(pos[:num_atom_bravais], dtype='double', order='C'),
np.array(numbers[:num_atom_bravais], dtype='intc'))
def find_primitive(bulk, symprec=1e-5, angle_tolerance=-1.0):
"""
A primitive cell in the input cell is searched and returned
as an object of Atoms class.
If no primitive cell is found, (None, None, None) is returned.
"""
# Atomic positions have to be specified by scaled positions for spglib.
positions = np.array(bulk.get_scaled_positions(), dtype='double', order='C')
lattice = np.array(bulk.get_cell().T, dtype='double', order='C')
numbers = np.array(bulk.get_atomic_numbers(), dtype='intc')
# lattice is transposed with respect to the definition of Atoms class
num_atom_prim = spg.primitive(lattice,
positions,
numbers,
symprec,
angle_tolerance)
if num_atom_prim > 0:
return (np.array(lattice.T, dtype='double', order='C'),
np.array(positions[:num_atom_prim], dtype='double', order='C'),
np.array(numbers[:num_atom_prim], dtype='intc'))
else:
return None, None, None
############
# k-points #
############
def get_grid_point_from_address(grid_address, mesh):
"""
Return grid point index by tranlating grid address
"""
return spg.grid_point_from_address(np.array(grid_address, dtype='intc'),
np.array(mesh, dtype='intc'))
def get_ir_reciprocal_mesh(mesh,
bulk,
is_shift=np.zeros(3, dtype='intc'),
is_time_reversal=True,
symprec=1e-5):
"""
Return k-points mesh and k-point map to the irreducible k-points
The symmetry is serched from the input cell.
is_shift=[0, 0, 0] gives Gamma center mesh.
"""
mapping = np.zeros(np.prod(mesh), dtype='intc')
mesh_points = np.zeros((np.prod(mesh), 3), dtype='intc')
spg.ir_reciprocal_mesh(
mesh_points,
mapping,
np.array(mesh, dtype='intc'),
np.array(is_shift, dtype='intc'),
is_time_reversal * 1,
np.array(bulk.get_cell().T, dtype='double', order='C'),
np.array(bulk.get_scaled_positions(), dtype='double', order='C'),
np.array(bulk.get_atomic_numbers(), dtype='intc'),
symprec)
return mapping, mesh_points
def get_grid_points_by_rotations(address_orig,
reciprocal_rotations,
mesh,
is_shift=np.zeros(3, dtype='intc')):
"""
Rotation operations in reciprocal space ``reciprocal_rotations`` are applied
to a grid point ``grid_point`` and resulting grid points are returned.
"""
rot_grid_points = np.zeros(len(reciprocal_rotations), dtype='intc')
spg.grid_points_by_rotations(
rot_grid_points,
np.array(address_orig, dtype='intc'),
np.array(reciprocal_rotations, dtype='intc', order='C'),
np.array(mesh, dtype='intc'),
np.array(is_shift, dtype='intc'))
return rot_grid_points
def get_BZ_grid_points_by_rotations(address_orig,
reciprocal_rotations,
mesh,
bz_map,
is_shift=np.zeros(3, dtype='intc')):
"""
Rotation operations in reciprocal space ``reciprocal_rotations`` are applied
to a grid point ``grid_point`` and resulting grid points are returned.
"""
rot_grid_points = np.zeros(len(reciprocal_rotations), dtype='intc')
spg.BZ_grid_points_by_rotations(
rot_grid_points,
np.array(address_orig, dtype='intc'),
np.array(reciprocal_rotations, dtype='intc', order='C'),
np.array(mesh, dtype='intc'),
np.array(is_shift, dtype='intc'),
bz_map)
return rot_grid_points
def relocate_BZ_grid_address(grid_address,
mesh,
reciprocal_lattice, # column vectors
is_shift=np.zeros(3, dtype='intc')):
"""
Grid addresses are relocated inside Brillouin zone.
Number of ir-grid-points inside Brillouin zone is returned.
It is assumed that the following arrays have the shapes of
bz_grid_address[prod(mesh + 1)][3]
bz_map[prod(mesh * 2)]
where grid_address[prod(mesh)][3].
Each element of grid_address is mapped to each element of
bz_grid_address with keeping element order. bz_grid_address has
larger memory space to represent BZ surface even if some points
on a surface are translationally equivalent to the other points
on the other surface. Those equivalent points are added successively
as grid point numbers to bz_grid_address. Those added grid points
are stored after the address of end point of grid_address, i.e.
|-----------------array size of bz_grid_address---------------------|
|--grid addresses similar to grid_address--|--newly added ones--|xxx|
where xxx means the memory space that may not be used. Number of grid
points stored in bz_grid_address is returned.
bz_map is used to recover grid point index expanded to include BZ
surface from grid address. The grid point indices are mapped to
(mesh[0] * 2) x (mesh[1] * 2) x (mesh[2] * 2) space (bz_map).
"""
bz_grid_address = np.zeros(
((mesh[0] + 1) * (mesh[1] + 1) * (mesh[2] + 1), 3), dtype='intc')
bz_map = np.zeros(
(2 * mesh[0]) * (2 * mesh[1]) * (2 * mesh[2]), dtype='intc')
num_bz_ir = spg.BZ_grid_address(
bz_grid_address,
bz_map,
grid_address,
np.array(mesh, dtype='intc'),
np.array(reciprocal_lattice, dtype='double', order='C'),
np.array(is_shift, dtype='intc'))
return bz_grid_address[:num_bz_ir], bz_map
def get_stabilized_reciprocal_mesh(mesh,
rotations,
is_shift=np.zeros(3, dtype='intc'),
is_time_reversal=True,
qpoints=np.array([], dtype='double')):
"""
Return k-point map to the irreducible k-points and k-point grid points .
The symmetry is searched from the input rotation matrices in real space.
is_shift=[0, 0, 0] gives Gamma center mesh and the values 1 give
half mesh distance shifts.
"""
mapping = np.zeros(np.prod(mesh), dtype='intc')
mesh_points = np.zeros((np.prod(mesh), 3), dtype='intc')
qpoints = np.array(qpoints, dtype='double', order='C')
if qpoints.shape == (3,):
qpoints = np.array([qpoints], dtype='double', order='C')
if qpoints.shape == (0,):
qpoints = np.array([[0, 0, 0]], dtype='double', order='C')
spg.stabilized_reciprocal_mesh(
mesh_points,
mapping,
np.array(mesh, dtype='intc'),
np.array(is_shift, dtype='intc'),
is_time_reversal * 1,
np.array(rotations, dtype='intc', order='C'),
qpoints)
return mapping, mesh_points
def get_triplets_reciprocal_mesh_at_q(fixed_grid_number,
mesh,
rotations,
is_time_reversal=True):
map_triplets = np.zeros(np.prod(mesh), dtype='intc')
map_q = np.zeros(np.prod(mesh), dtype='intc')
mesh_points = np.zeros((np.prod(mesh), 3), dtype='intc')
spg.triplets_reciprocal_mesh_at_q(
map_triplets,
map_q,
mesh_points,
fixed_grid_number,
np.array(mesh, dtype='intc'),
is_time_reversal * 1,
np.array(rotations, dtype='intc', order='C'))
return map_triplets, map_q, mesh_points
def get_BZ_triplets_at_q(grid_point,
bz_grid_address,
bz_map,
map_triplets,
mesh):
"""grid_address is overwritten."""
weights = np.zeros_like(map_triplets)
for g in map_triplets:
weights[g] += 1
ir_weights = np.extract(weights > 0, weights)
triplets = np.zeros((len(ir_weights), 3), dtype='intc')
num_ir_ret = spg.BZ_triplets_at_q(triplets,
grid_point,
bz_grid_address,
bz_map,
map_triplets,
np.array(mesh, dtype='intc'))
return triplets, ir_weights
def get_neighboring_grid_points(grid_point,
relative_grid_address,
mesh,
bz_grid_address,
bz_map):
relative_grid_points = np.zeros(len(relative_grid_address), dtype='intc')
spg.neighboring_grid_points(relative_grid_points,
grid_point,
relative_grid_address,
mesh,
bz_grid_address,
bz_map)
return relative_grid_points
######################
# Tetrahedron method #
######################
def get_triplets_tetrahedra_vertices(relative_grid_address,
mesh,
triplets,
bz_grid_address,
bz_map):
num_tripltes = len(triplets)
vertices = np.zeros((num_tripltes, 2, 24, 4), dtype='intc')
for i, tp in enumerate(triplets):
vertices_at_tp = np.zeros((2, 24, 4), dtype='intc')
spg.triplet_tetrahedra_vertices(
vertices_at_tp,
relative_grid_address,
np.array(mesh, dtype='intc'),
tp,
bz_grid_address,
bz_map)
vertices[i] = vertices_at_tp
return vertices
def get_tetrahedra_relative_grid_address(microzone_lattice):
"""
reciprocal_lattice:
column vectors of parallel piped microzone lattice
which can be obtained by:
microzone_lattice = np.linalg.inv(bulk.get_cell()) / mesh
"""
relative_grid_address = np.zeros((24, 4, 3), dtype='intc')
spg.tetrahedra_relative_grid_address(
relative_grid_address,
np.array(microzone_lattice, dtype='double', order='C'))
return relative_grid_address
def get_all_tetrahedra_relative_grid_address():
relative_grid_address = np.zeros((4, 24, 4, 3), dtype='intc')
spg.all_tetrahedra_relative_grid_address(relative_grid_address)
return relative_grid_address
def get_tetrahedra_integration_weight(omegas,
tetrahedra_omegas,
function='I'):
if isinstance(omegas, float):
return spg.tetrahedra_integration_weight(
omegas,
np.array(tetrahedra_omegas, dtype='double', order='C'),
function)
else:
integration_weights = np.zeros(len(omegas), dtype='double')
spg.tetrahedra_integration_weight_at_omegas(
integration_weights,
np.array(omegas, dtype='double'),
np.array(tetrahedra_omegas, dtype='double', order='C'),
function)
return integration_weights
'''
|
pylada/pylada-defects
|
pyspglib/spglib.py
|
Python
|
gpl-3.0
| 21,936
|
[
"ASE"
] |
e1029687c9a59e87a9b355fb7f61cfd6c4db888d153b96cd1574f5ec0a4f5fc5
|
from paraview.simple import *
import sys, time
# Path to Leap Motion Python Library
leapPath = "LeapDeveloperKit/LeapSDK/lib"
sys.path.append(leapPath)
print leapPath + " has been appended to library. Enjoy Leap Motion"
import Leap
from Leap import CircleGesture, KeyTapGesture, ScreenTapGesture, SwipeGesture
# ParaView object/data to interact with
c = Cone(Radius=1)
# Show the object/data to demonstrate
Show(c)
camera = GetActiveCamera()
# Don't show the slice for demonstrative purposes
sl = Slice(c)
Render()
#Show(sl)
class SampleListener(Leap.Listener):
def on_init(self, controller):
print "Initialized"
def on_connect(self, controller):
print "Connected"
# Enable gestures
controller.enable_gesture(Leap.Gesture.TYPE_CIRCLE);
controller.enable_gesture(Leap.Gesture.TYPE_KEY_TAP);
controller.enable_gesture(Leap.Gesture.TYPE_SCREEN_TAP);
controller.enable_gesture(Leap.Gesture.TYPE_SWIPE);
def on_disconnect(self, controller):
# Note: not dispatched when running in a debugger.
print "Disconnected"
def on_exit(self, controller):
print "Exited"
def on_frame(self, controller):
# Get the most recent frame and report some basic information
frame = controller.frame()
x,y,z = (0,1,2)
normal = frame.hands[0].palm_normal
position = frame.hands[0].stabilized_palm_position
print "Normal:", normal[0] , normal[1], normal[2]
print frame.hands[0].palm_normal.roll * Leap.RAD_TO_DEG
# ParaView Manipulation: camera is turned based on user's (first hand)
if not frame.hands.is_empty:
# Use the roll of hand for 2D rotation (rotate about vector point out from screen, like a doorknob)
rollAngle = frame.hands[0].palm_normal.roll * Leap.RAD_TO_DEG
camera.SetRoll( rollAngle * 2 )
# Orignally to rotate about another axis, but difficult for user interaction
#azimAngle = frame.hands[0].palm_normal.yaw * Leap.RAD_TO_DEG
#camera.Yaw( azimAngle )
# No hand was detected so set camera back to original state
# Line could be deleted to retain rotation after hand is removed
else:
camera.SetRoll(0)
# Render the change now
Render()
print "hands: %d, fingers: %d, tools: %d, gestures: %d" % (
len(frame.hands), len(frame.fingers), len(frame.tools), len(frame.gestures()))
if not frame.hands.is_empty:
# Get the first hand
hand = frame.hands[0]
# Check if the hand has any fingers
# Get the hand's sphere Radius=1 and palm position
print "Hand sphere Radius=1: %f mm, palm position: %s" % (
hand.sphere_radius, hand.palm_position)
# Get the hand's normal vector and direction
normal = hand.palm_normal
direction = hand.direction
# Calculate the hand's pitch, roll, and yaw angles
print "Hand pitch: %f degrees, roll: %f degrees, yaw: %f degrees" % (
direction.pitch * Leap.RAD_TO_DEG,
normal.roll * Leap.RAD_TO_DEG,
direction.yaw * Leap.RAD_TO_DEG)
# Gestures
for gesture in frame.gestures():
if gesture.type == Leap.Gesture.TYPE_CIRCLE:
circle = CircleGesture(gesture)
# Determine clock direction using the angle between the pointable and the circle normal
if circle.pointable.direction.angle_to(circle.normal) <= Leap.PI/4:
clockwiseness = "clockwise"
else:
clockwiseness = "counterclockwise"
# Calculate the angle swept since the last frame
swept_angle = 0
if circle.state != Leap.Gesture.STATE_START:
previous_update = CircleGesture(controller.frame(1).gesture(circle.id))
swept_angle = (circle.progress - previous_update.progress) * 2 * Leap.PI
print "Circle id: %d, %s, progress: %f, Radius=1: %f, angle: %f degrees, %s" % (
gesture.id, self.state_string(gesture.state),
circle.progress, circleradius, swept_angle * Leap.RAD_TO_DEG, clockwiseness)
if gesture.type == Leap.Gesture.TYPE_SWIPE:
swipe = SwipeGesture(gesture)
print "Swipe id: %d, state: %s, position: %s, direction: %s, speed: %f" % (
gesture.id, self.state_string(gesture.state),
swipe.position, swipe.direction, swipe.speed)
if gesture.type == Leap.Gesture.TYPE_KEY_TAP:
keytap = KeyTapGesture(gesture)
print "Key Tap id: %d, %s, position: %s, direction: %s" % (
gesture.id, self.state_string(gesture.state),
keytap.position, keytap.direction )
if gesture.type == Leap.Gesture.TYPE_SCREEN_TAP:
screentap = ScreenTapGesture(gesture)
print "Screen Tap id: %d, %s, position: %s, direction: %s" % (
gesture.id, self.state_string(gesture.state),
screentap.position, screentap.direction )
if not (frame.hands.is_empty and frame.gestures().is_empty):
print ""
def state_string(self, state):
if state == Leap.Gesture.STATE_START:
return "STATE_START"
if state == Leap.Gesture.STATE_UPDATE:
return "STATE_UPDATE"
if state == Leap.Gesture.STATE_STOP:
return "STATE_STOP"
if state == Leap.Gesture.STATE_INVALID:
return "STATE_INVALID"
def main():
# Create a sample listener and controller
listener = SampleListener()
controller = Leap.Controller()
# Have the sample listener receive events from the controller
controller.add_listener(listener)
while True:
time.sleep(1.0)
# Remove the sample listener when done
controller.remove_listener(listener)
if __name__ == "__main__":
main()
main()
|
VictorLoren/leapmotion-paraview
|
Code/test_rotate.py
|
Python
|
mit
| 6,329
|
[
"ParaView"
] |
167366941092f0f7ac5c34a5c25c8e52e1ff46966bbaa55fdf94e0ca5d0777e9
|
"""
Logout Page
"""
import os
from bok_choy.page_object import PageObject
from selenium.common.exceptions import WebDriverException
from regression.pages.whitelabel import ECOM_URL_WITH_AUTH
from regression.pages.whitelabel.home_page import HomePage
class EcommerceLogoutPage(PageObject):
"""
E-Commerce Logout
Use visit() to actually perform the logout.
"""
url = os.path.join(ECOM_URL_WITH_AUTH, 'logout/')
def is_browser_on_page(self):
"""
Is browser on the page?
Returns:
True if the sign out message is on the page.
"""
home_page = HomePage(self.browser)
try:
return ("you have signed out" in self.browser.page_source.lower()) or \
home_page.is_browser_on_page()
except WebDriverException:
# page is not yet available
return False
|
edx/edx-e2e-tests
|
regression/pages/whitelabel/logout_page.py
|
Python
|
agpl-3.0
| 888
|
[
"VisIt"
] |
4c2bf4d0e660d77d56057f91756bac3813c689bf9d250142629bba2d06eabc72
|
"""Main entry points for scripts."""
from __future__ import print_function, division
from argparse import ArgumentParser
from collections import OrderedDict
from copy import copy
from datetime import datetime
import glob
import json
import logging
import math
import os
import scipy.stats
import numpy as np
from .version import __version__
from .psffuncs import gaussian_moffat_psf
from .psf import TabularPSF, GaussianMoffatPSF
from .io import read_datacube, write_results, read_results
from .fitting import (guess_sky, fit_galaxy_single, fit_galaxy_sky_multi,
fit_position_sky, fit_position_sky_sn_multi,
RegularizationPenalty)
from .utils import yxbounds
from .extern import ADR, Hyper_PSF3D_PL
__all__ = ["cubefit", "cubefit_subtract", "cubefit_plot"]
MODEL_SHAPE = (32, 32)
SPAXEL_SIZE = 0.43
MIN_NMAD = 2.5 # Minimum Number of Median Absolute Deviations above
# the minimum spaxel value in fit_position
LBFGSB_FACTOR = 1e10
REFWAVE = 5000. # reference wavelength in Angstroms for PSF params and ADR
POSITION_BOUND = 3. # Bound on fitted positions relative in initial positions
def snfpsf(wave, psfparams, header, psftype):
"""Create a 3-d PSF based on SNFactory-specific parameterization of
Gaussian + Moffat PSF parameters and ADR."""
# Get Gaussian+Moffat parameters at each wavelength.
relwave = wave / REFWAVE - 1.0
ellipticity = abs(psfparams[0]) * np.ones_like(wave)
alpha = np.abs(psfparams[1] +
psfparams[2] * relwave +
psfparams[3] * relwave**2)
# correlated parameters (coefficients determined externally)
sigma = 0.545 + 0.215 * alpha # Gaussian parameter
beta = 1.685 + 0.345 * alpha # Moffat parameter
eta = 1.040 + 0.0 * alpha # gaussian ampl. / moffat ampl.
# Atmospheric differential refraction (ADR): Because of ADR,
# the center of the PSF will be different at each wavelength,
# by an amount that we can determine (pretty well) from the
# atmospheric conditions and the pointing and angle of the
# instrument. We calculate the offsets here as a function of
# observation and wavelength and input these to the model.
# Correction to parallactic angle and airmass for 2nd-order effects
# such as MLA rotation, mechanical flexures or finite-exposure
# corrections. These values have been trained on faint-std star
# exposures.
#
# `predict_adr_params` uses 'AIRMASS', 'PARANG' and 'CHANNEL' keys
# in input dictionary.
delta, theta = Hyper_PSF3D_PL.predict_adr_params(header)
# check for crazy values of pressure and temperature, and assign default
# values.
pressure = header.get('PRESSURE', 617.)
if not 550. < pressure < 650.:
pressure = 617.
temp = header.get('TEMP', 2.)
if not -20. < temp < 20.:
temp = 2.
adr = ADR(pressure, temp, lref=REFWAVE, delta=delta, theta=theta)
adr_refract = adr.refract(0, 0, wave, unit=SPAXEL_SIZE)
# adr_refract[0, :] corresponds to x, adr_refract[1, :] => y
xctr, yctr = adr_refract
if psftype == 'gaussian-moffat':
return GaussianMoffatPSF(sigma, alpha, beta, ellipticity, eta,
yctr, xctr, MODEL_SHAPE, subpix=3)
elif psftype == 'tabular':
A = gaussian_moffat_psf(sigma, alpha, beta, ellipticity, eta,
yctr, xctr, MODEL_SHAPE, subpix=3)
return TabularPSF(A)
else:
raise ValueError("unknown psf type: " + repr(psftype))
def setup_logging(loglevel, logfname=None):
# if loglevel isn't an integer, parse it as "debug", "info", etc:
if not isinstance(loglevel, int):
loglevel = getattr(logging, loglevel.upper(), None)
if not isinstance(loglevel, int):
print('Invalid log level: %s' % loglevel)
exit(1)
# remove logfile if it already exists
if logfname is not None and os.path.exists(logfname):
os.remove(logfname)
logging.basicConfig(filename=logfname, format="%(levelname)s %(message)s",
level=loglevel)
def cubefit(argv=None):
DESCRIPTION = "Fit SN + galaxy model to SNFactory data cubes."
parser = ArgumentParser(prog="cubefit", description=DESCRIPTION)
parser.add_argument("configfile",
help="configuration file name (JSON format)")
parser.add_argument("outfile", help="Output file name (FITS format)")
parser.add_argument("--dataprefix", default="",
help="path prepended to data file names; default is "
"empty string")
parser.add_argument("--logfile", help="Write log to this file "
"(default: print to stdout)", default=None)
parser.add_argument("--loglevel", default="info",
help="one of: debug, info, warning (default is info)")
parser.add_argument("--diagdir", default=None,
help="If given, write intermediate diagnostic results "
"to this directory")
parser.add_argument("--refitgal", default=False, action="store_true",
help="Add an iteration where galaxy model is fit "
"using all epochs and then data/SN positions are "
"refit")
parser.add_argument("--mu_wave", default=0.07, type=float,
help="Wavelength regularization parameter. "
"Default is 0.07.")
parser.add_argument("--mu_xy", default=0.001, type=float,
help="Spatial regularization parameter. "
"Default is 0.001.")
parser.add_argument("--psftype", default="gaussian-moffat",
help="Type of PSF: 'gaussian-moffat' or 'tabular'. "
"Currently, tabular means generate a tabular PSF from "
"gaussian-moffat parameters.")
args = parser.parse_args(argv)
setup_logging(args.loglevel, logfname=args.logfile)
# record start time
tstart = datetime.now()
logging.info("cubefit v%s started at %s", __version__,
tstart.strftime("%Y-%m-%d %H:%M:%S"))
tsteps = OrderedDict() # finish time of each step.
logging.info("parameters: mu_wave={:.3g} mu_xy={:.3g} refitgal={}"
.format(args.mu_wave, args.mu_xy, args.refitgal))
logging.info(" psftype={}".format(args.psftype))
logging.info("reading config file")
with open(args.configfile) as f:
cfg = json.load(f)
# basic checks on config contents.
assert (len(cfg["filenames"]) == len(cfg["xcenters"]) ==
len(cfg["ycenters"]) == len(cfg["psf_params"]))
# -------------------------------------------------------------------------
# Load data cubes from the list of FITS files.
nt = len(cfg["filenames"])
logging.info("reading %d data cubes", nt)
cubes = []
for fname in cfg["filenames"]:
logging.debug(" reading %s", fname)
cubes.append(read_datacube(os.path.join(args.dataprefix, fname)))
wave = cubes[0].wave
nw = len(wave)
# assign some local variables for convenience
refs = cfg["refs"]
master_ref = cfg["master_ref"]
if master_ref not in refs:
raise ValueError("master ref choice must be one of the final refs (" +
" ".join(refs.astype(str)) + ")")
nonmaster_refs = [i for i in refs if i != master_ref]
nonrefs = [i for i in range(nt) if i not in refs]
# Ensure that all cubes have the same wavelengths.
if not all(np.all(cubes[i].wave == wave) for i in range(1, nt)):
raise ValueError("all data must have same wavelengths")
# -------------------------------------------------------------------------
# PSF for each observation
logging.info("setting up PSF for all %d epochs", nt)
psfs = [snfpsf(wave, cfg["psf_params"][i], cubes[i].header, args.psftype)
for i in range(nt)]
# -------------------------------------------------------------------------
# Initialize all model parameters to be fit
yctr0 = np.array(cfg["ycenters"])
xctr0 = np.array(cfg["xcenters"])
galaxy = np.zeros((nw, MODEL_SHAPE[0], MODEL_SHAPE[1]), dtype=np.float64)
sn = np.zeros((nt, nw), dtype=np.float64) # SN spectrum at each epoch
skys = np.zeros((nt, nw), dtype=np.float64) # Sky spectrum at each epoch
yctr = yctr0.copy()
xctr = xctr0.copy()
snctr = (0., 0.)
# For writing out to FITS
modelwcs = {"CRVAL1": -SPAXEL_SIZE * (MODEL_SHAPE[0] - 1) / 2.,
"CRPIX1": 1,
"CDELT1": SPAXEL_SIZE,
"CRVAL2": -SPAXEL_SIZE * (MODEL_SHAPE[1] - 1) / 2.,
"CRPIX2": 1,
"CDELT2": SPAXEL_SIZE,
"CRVAL3": cubes[0].header["CRVAL3"],
"CRPIX3": cubes[0].header["CRPIX3"],
"CDELT3": cubes[0].header["CDELT3"]}
# -------------------------------------------------------------------------
# Position bounds
# Bounds on data position: shape=(nt, 2)
xctrbounds = np.vstack((xctr - POSITION_BOUND, xctr + POSITION_BOUND)).T
yctrbounds = np.vstack((yctr - POSITION_BOUND, yctr + POSITION_BOUND)).T
snctrbounds = (-POSITION_BOUND, POSITION_BOUND)
# For data positions, check that bounds do not extend
# past the edge of the model and adjust the minbound and maxbound.
# This doesn't apply to SN position.
gshape = galaxy.shape[1:3] # model shape
for i in range(nt):
dshape = cubes[i].data.shape[1:3]
(yminabs, ymaxabs), (xminabs, xmaxabs) = yxbounds(gshape, dshape)
yctrbounds[i, 0] = max(yctrbounds[i, 0], yminabs)
yctrbounds[i, 1] = min(yctrbounds[i, 1], ymaxabs)
xctrbounds[i, 0] = max(xctrbounds[i, 0], xminabs)
xctrbounds[i, 1] = min(xctrbounds[i, 1], xmaxabs)
# -------------------------------------------------------------------------
# Guess sky
logging.info("guessing sky for all %d epochs", nt)
for i, cube in enumerate(cubes):
skys[i, :] = guess_sky(cube, npix=30)
# -------------------------------------------------------------------------
# Regularization penalty parameters
# Calculate rough average galaxy spectrum from all final refs.
spectra = np.zeros((len(refs), len(wave)), dtype=np.float64)
for j, i in enumerate(refs):
avg_spec = np.average(cubes[i].data, axis=(1, 2)) - skys[i]
mean_spec, bins, bn = scipy.stats.binned_statistic(wave, avg_spec,
bins=len(wave)/10)
spectra[j] = np.interp(wave, bins[:-1] + np.diff(bins)[0]/2.,
mean_spec)
mean_gal_spec = np.average(spectra, axis=0)
# Ensure that there won't be any negative or tiny values in mean:
mean_floor = 0.1 * np.median(mean_gal_spec)
mean_gal_spec[mean_gal_spec < mean_floor] = mean_floor
galprior = np.zeros((nw, MODEL_SHAPE[0], MODEL_SHAPE[1]), dtype=np.float64)
regpenalty = RegularizationPenalty(galprior, mean_gal_spec, args.mu_xy,
args.mu_wave)
tsteps["setup"] = datetime.now()
# -------------------------------------------------------------------------
# Fit just the galaxy model to just the master ref.
data = cubes[master_ref].data - skys[master_ref, :, None, None]
weight = cubes[master_ref].weight
logging.info("fitting galaxy to master ref [%d]", master_ref)
galaxy = fit_galaxy_single(galaxy, data, weight,
(yctr[master_ref], xctr[master_ref]),
psfs[master_ref], regpenalty, LBFGSB_FACTOR)
if args.diagdir:
fname = os.path.join(args.diagdir, 'step1.fits')
write_results(galaxy, skys, sn, snctr, yctr, xctr, yctr0, xctr0,
yctrbounds, xctrbounds, cubes, psfs, modelwcs, fname)
tsteps["fit galaxy to master ref"] = datetime.now()
# -------------------------------------------------------------------------
# Fit the positions of the other final refs
#
# Here we only use spaxels where the *model* has significant flux.
# We define "significant" as some number of median absolute deviations
# (MAD) above the minimum flux in the model. We (temporarily) set the
# weight of "insignificant" spaxels to zero during this process, then
# restore the original weight after we're done.
#
# If there are less than 20 "significant" spaxels, we do not attempt to
# fit the position, but simply leave it as is.
logging.info("fitting position of non-master refs %s", nonmaster_refs)
for i in nonmaster_refs:
cube = cubes[i]
# Evaluate galaxy on this epoch for purpose of masking spaxels.
gal = psfs[i].evaluate_galaxy(galaxy, (cube.ny, cube.nx),
(yctr[i], xctr[i]))
# Set weight of low-valued spaxels to zero.
gal2d = gal.sum(axis=0) # Sum of gal over wavelengths
mad = np.median(np.abs(gal2d - np.median(gal2d)))
mask = gal2d > np.min(gal2d) + MIN_NMAD * mad
if mask.sum() < 20:
continue
weight = cube.weight * mask[None, :, :]
fctr, fsky = fit_position_sky(galaxy, cube.data, weight,
(yctr[i], xctr[i]), psfs[i],
(yctrbounds[i], xctrbounds[i]))
yctr[i], xctr[i] = fctr
skys[i, :] = fsky
tsteps["fit positions of other refs"] = datetime.now()
# -------------------------------------------------------------------------
# Redo model fit, this time including all final refs.
datas = [cubes[i].data for i in refs]
weights = [cubes[i].weight for i in refs]
ctrs = [(yctr[i], xctr[i]) for i in refs]
psfs_refs = [psfs[i] for i in refs]
logging.info("fitting galaxy to all refs %s", refs)
galaxy, fskys = fit_galaxy_sky_multi(galaxy, datas, weights, ctrs,
psfs_refs, regpenalty, LBFGSB_FACTOR)
# put fitted skys back in `skys`
for i,j in enumerate(refs):
skys[j, :] = fskys[i]
if args.diagdir:
fname = os.path.join(args.diagdir, 'step2.fits')
write_results(galaxy, skys, sn, snctr, yctr, xctr, yctr0, xctr0,
yctrbounds, xctrbounds, cubes, psfs, modelwcs, fname)
tsteps["fit galaxy to all refs"] = datetime.now()
# -------------------------------------------------------------------------
# Fit position of data and SN in non-references
#
# Now we think we have a good galaxy model. We fix this and fit
# the relative position of the remaining epochs (which presumably
# all have some SN light). We simultaneously fit the position of
# the SN itself.
logging.info("fitting position of all %d non-refs and SN position",
len(nonrefs))
if len(nonrefs) > 0:
datas = [cubes[i].data for i in nonrefs]
weights = [cubes[i].weight for i in nonrefs]
psfs_nonrefs = [psfs[i] for i in nonrefs]
fyctr, fxctr, snctr, fskys, fsne = fit_position_sky_sn_multi(
galaxy, datas, weights, yctr[nonrefs], xctr[nonrefs],
snctr, psfs_nonrefs, LBFGSB_FACTOR, yctrbounds[nonrefs],
xctrbounds[nonrefs], snctrbounds)
# put fitted results back in parameter lists.
yctr[nonrefs] = fyctr
xctr[nonrefs] = fxctr
for i,j in enumerate(nonrefs):
skys[j, :] = fskys[i]
sn[j, :] = fsne[i]
tsteps["fit positions of nonrefs & SN"] = datetime.now()
# -------------------------------------------------------------------------
# optional step(s)
if args.refitgal and len(nonrefs) > 0:
if args.diagdir:
fname = os.path.join(args.diagdir, 'step3.fits')
write_results(galaxy, skys, sn, snctr, yctr, xctr, yctr0, xctr0,
yctrbounds, xctrbounds, cubes, psfs, modelwcs, fname)
# ---------------------------------------------------------------------
# Redo fit of galaxy, using ALL epochs, including ones with SN
# light. We hold the SN "fixed" simply by subtracting it from the
# data and fitting the remainder.
#
# This is slightly dangerous: any errors in the original SN
# determination, whether due to an incorrect PSF or ADR model
# or errors in the galaxy model will result in residuals. The
# galaxy model will then try to compensate for these.
#
# We should look at the galaxy model at the position of the SN
# before and after this step to see if there is a bias towards
# the galaxy flux increasing.
logging.info("fitting galaxy using all %d epochs", nt)
datas = [cube.data for cube in cubes]
weights = [cube.weight for cube in cubes]
ctrs = [(yctr[i], xctr[i]) for i in range(nt)]
# subtract SN from non-ref cubes.
for i in nonrefs:
s = psfs[i].point_source(snctr, datas[i].shape[1:3], ctrs[i])
# do *not* use in-place operation (-=) here!
datas[i] = cubes[i].data - sn[i, :, None, None] * s
galaxy, fskys = fit_galaxy_sky_multi(galaxy, datas, weights, ctrs,
psfs, regpenalty, LBFGSB_FACTOR)
for i in range(nt):
skys[i, :] = fskys[i] # put fitted skys back in skys
if args.diagdir:
fname = os.path.join(args.diagdir, 'step4.fits')
write_results(galaxy, skys, sn, snctr, yctr, xctr, yctr0, xctr0,
yctrbounds, xctrbounds, cubes, psfs, modelwcs, fname)
# ---------------------------------------------------------------------
# Repeat step before last: fit position of data and SN in
# non-references
logging.info("re-fitting position of all %d non-refs and SN position",
len(nonrefs))
if len(nonrefs) > 0:
datas = [cubes[i].data for i in nonrefs]
weights = [cubes[i].weight for i in nonrefs]
psfs_nonrefs = [psfs[i] for i in nonrefs]
fyctr, fxctr, snctr, fskys, fsne = fit_position_sky_sn_multi(
galaxy, datas, weights, yctr[nonrefs], xctr[nonrefs],
snctr, psfs_nonrefs, LBFGSB_FACTOR, yctrbounds[nonrefs],
xctrbounds[nonrefs], snctrbounds)
# put fitted results back in parameter lists.
yctr[nonrefs] = fyctr
xctr[nonrefs] = fxctr
for i, j in enumerate(nonrefs):
skys[j, :] = fskys[i]
sn[j, :] = fsne[i]
# -------------------------------------------------------------------------
# Write results
logging.info("writing results to %s", args.outfile)
write_results(galaxy, skys, sn, snctr, yctr, xctr, yctr0, xctr0,
yctrbounds, xctrbounds, cubes, psfs, modelwcs, args.outfile)
# time info
logging.info("step times:")
maxlen = max(len(key) for key in tsteps)
fmtstr = " %2dm%02ds - %-" + str(maxlen) + "s"
tprev = tstart
for key, tstep in tsteps.items():
t = (tstep - tprev).seconds
logging.info(fmtstr, t//60, t%60, key)
tprev = tstep
tfinish = datetime.now()
logging.info("finished at %s", tfinish.strftime("%Y-%m-%d %H:%M:%S"))
t = (tfinish - tstart).seconds
logging.info("took %3dm%2ds", t // 60, t % 60)
return 0
def cubefit_subtract(argv=None):
DESCRIPTION = \
"""Subtract model determined by cubefit from the original data.
The "outnames" key in the supplied configuration file is used to
determine the output FITS file names. The input FITS header is passed
unaltered to the output file, with the following additions:
(1) A `HISTORY` entry. (2) `CBFT_SNX` and `CBFT_SNY` records giving
the cubefit-determined position of the SN relative to the center of
the data array (at the reference wavelength).
This script also writes fitted SN spectra to individual FITS files.
The "sn_outnames" configuration field determines the output filenames.
"""
import shutil
import fitsio
prog_name = "cubefit-subtract"
prog_name_ver = "{} v{}".format(prog_name, __version__)
parser = ArgumentParser(prog=prog_name, description=DESCRIPTION)
parser.add_argument("configfile", help="configuration file name "
"(JSON format), same as cubefit input.")
parser.add_argument("resultfile", help="Result FITS file from cubefit")
parser.add_argument("--dataprefix", default="",
help="path prepended to data file names; default is "
"empty string")
parser.add_argument("--outprefix", default="",
help="path prepended to output file names; default is "
"empty string")
args = parser.parse_args(argv)
setup_logging("info")
# get input & output filenames
with open(args.configfile) as f:
cfg = json.load(f)
fnames = [os.path.join(args.dataprefix, fname)
for fname in cfg["filenames"]]
outfnames = [os.path.join(args.outprefix, fname)
for fname in cfg["outnames"]]
# load results
results = read_results(args.resultfile)
epochs = results["epochs"]
sny, snx = results["snctr"]
if not len(epochs) == len(fnames) == len(outfnames):
raise RuntimeError("number of epochs in result file not equal to "
"number of input and output files in config file")
# subtract and write out.
for fname, outfname, epoch in zip(fnames, outfnames, epochs):
logging.info("writing %s", outfname)
shutil.copy(fname, outfname)
f = fitsio.FITS(outfname, "rw")
data = f[0].read()
data -= epoch["galeval"]
f[0].write(data)
f[0].write_history("galaxy subtracted by " + prog_name_ver)
f[0].write_key("CBFT_SNX", snx - epoch['xctr'],
comment="SN x offset from center at {:.0f} A [spaxels]"
.format(REFWAVE))
f[0].write_key("CBFT_SNY", sny - epoch['yctr'],
comment="SN y offset from center at {:.0f} A [spaxels]"
.format(REFWAVE))
f.close()
# output SN spectra to separate files.
sn_outnames = [os.path.join(args.outprefix, fname)
for fname in cfg["sn_outnames"]]
header = {"CRVAL1": results["header"]["CRVAL3"],
"CRPIX1": results["header"]["CRPIX3"],
"CDELT1": results["header"]["CDELT3"]}
for outfname, epoch in zip(sn_outnames, epochs):
logging.info("writing %s", outfname)
if os.path.exists(outfname): # avoid warning from clobber=True
os.remove(outfname)
with fitsio.FITS(outfname, "rw") as f:
f.write(epoch["sn"], extname="sn", header=header)
f[0].write_history("created by " + prog_name_ver)
return 0
def cubefit_plot(argv=None):
DESCRIPTION = """Plot results and diagnostics from cubefit"""
from .plotting import plot_timeseries, plot_epoch, plot_sn, plot_adr
# arguments are the same as cubefit except an output
parser = ArgumentParser(prog="cubefit-plot", description=DESCRIPTION)
parser.add_argument("configfile", help="configuration filename")
parser.add_argument("resultfile", help="Result filename from cubefit")
parser.add_argument("outprefix", help="output prefix")
parser.add_argument("--dataprefix", default="",
help="path prepended to data file names; default is "
"empty string")
parser.add_argument('-b', '--band', help='timeseries band (U, B, V). '
'Default is a 1000 A wide band in middle of cube.',
default=None, dest='band')
parser.add_argument('--idrfiles', nargs='+', default=None,
help='Prefix of IDR. If given, the cubefit SN '
'spectra are plotted against the production values.')
parser.add_argument("--diagdir", default=None,
help="If given, read intermediate diagnostic "
"results from this directory and include in plot(s)")
parser.add_argument("--plotepochs", default=False, action="store_true",
help="Make diagnostic plots for each epoch")
args = parser.parse_args(argv)
# Read in data
with open(args.configfile) as f:
cfg = json.load(f)
cubes = [read_datacube(os.path.join(args.dataprefix, fname), scale=False)
for fname in cfg["filenames"]]
results = OrderedDict()
# Diagnostic results at each step
if args.diagdir is not None:
fnames = sorted(glob.glob(os.path.join(args.diagdir, "step*.fits")))
for fname in fnames:
name = os.path.basename(fname).split(".")[0]
results[name] = read_results(fname)
# Final result (don't fail if not available)
if os.path.exists(args.resultfile):
results["final"] = read_results(args.resultfile)
# plot time series
plot_timeseries(cubes, results, band=args.band,
fname=(args.outprefix + '_timeseries.png'))
# Plot wave slices and sn, galaxy and sky spectra for all epochs.
if 'final' in results and args.plotepochs:
for i_t in range(len(cubes)):
plot_epoch(cubes[i_t], results['final']['epochs'][i_t],
fname=(args.outprefix + '_epoch%02d.png' % i_t))
# Plot result spectra against IDR spectra.
if 'final' in results and args.idrfiles is not None:
plot_sn(cfg['filenames'], results['final']['epochs']['sn'],
results['final']['wave'], args.idrfiles,
args.outprefix + '_sn.png')
# Plot the x-y coordinates of the adr versus wavelength
# (Skip this for now; contains no interesting information)
#plot_adr(cubes, cubes[0].wave, fname=(args.outprefix + '_adr.png'))
return 0
|
snfactory/cubefit
|
cubefit/main.py
|
Python
|
mit
| 26,267
|
[
"Galaxy",
"Gaussian"
] |
fdc3cf376c7295f49148a9f7760f0abee6e02f6750406ebb03136010f0d903d7
|
# Copyright 2012 Brian Waldon
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Self-validating model for arbitrary objects"""
import copy
import warnings
import jsonpatch
import jsonschema
import six
from . import exceptions
class Model(dict):
def __init__(self, *args, **kwargs):
# we overload setattr so set this manually
d = dict(*args, **kwargs)
try:
self.validate(d)
except exceptions.ValidationError as exc:
raise ValueError(str(exc))
else:
dict.__init__(self, d)
self.__dict__['changes'] = {}
self.__dict__['__original__'] = copy.deepcopy(d)
def __setitem__(self, key, value):
mutation = dict(self.items())
mutation[key] = value
try:
self.validate(mutation)
except exceptions.ValidationError as exc:
msg = ("Unable to set '%s' to '%s'. Reason: %s"
% (key, value, str(exc)))
raise exceptions.InvalidOperation(msg)
dict.__setitem__(self, key, value)
self.__dict__['changes'][key] = value
def __delitem__(self, key):
mutation = dict(self.items())
del mutation[key]
try:
self.validate(mutation)
except exceptions.ValidationError as exc:
msg = ("Unable to delete attribute '%s'. Reason: %s"
% (key, str(exc)))
raise exceptions.InvalidOperation(msg)
dict.__delitem__(self, key)
def __getattr__(self, key):
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError(key)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __delattr__(self, key):
self.__delitem__(key)
### BEGIN dict compatibility methods ###
def clear(self):
raise exceptions.InvalidOperation()
def pop(self, key, default=None):
raise exceptions.InvalidOperation()
def popitem(self):
raise exceptions.InvalidOperation()
def copy(self):
return copy.deepcopy(dict(self))
def update(self, other):
mutation = dict(self.items())
mutation.update(other)
try:
self.validate(mutation)
except exceptions.ValidationError as exc:
raise exceptions.InvalidOperation(str(exc))
dict.update(self, other)
def iteritems(self):
return six.iteritems(copy.deepcopy(dict(self)))
def items(self):
return copy.deepcopy(dict(self)).items()
def itervalues(self):
return six.itervalues(copy.deepcopy(dict(self)))
def values(self):
return copy.deepcopy(dict(self)).values()
### END dict compatibility methods ###
@property
def patch(self):
"""Return a jsonpatch object representing the delta"""
original = self.__dict__['__original__']
return jsonpatch.make_patch(original, dict(self)).to_string()
@property
def changes(self):
"""Dumber version of 'patch' method"""
deprecation_msg = 'Model.changes will be removed in warlock v2'
warnings.warn(deprecation_msg, DeprecationWarning, stacklevel=2)
return copy.deepcopy(self.__dict__['changes'])
def validate(self, obj):
"""Apply a JSON schema to an object"""
try:
jsonschema.validate(obj, self.schema)
except jsonschema.ValidationError as exc:
raise exceptions.ValidationError(str(exc))
|
sjsucohort6/openstack
|
python/venv/lib/python2.7/site-packages/warlock/model.py
|
Python
|
mit
| 3,998
|
[
"Brian"
] |
5bd51de318674fb1a38c1bd21f358257310a8f9a8b15ba749f9e4d56a9104587
|
import re
import os
import os.path as op
import requests
import psutil
import time
import shutil
import logging
import atexit
import subprocess
import shlex
import six
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.SubsMat import MatrixInfo
from . import conf, helper, errors
logger = logging.getLogger(__name__)
CANONICAL_AMINO_ACIDS = 'ARNDCEQGHILKMFPSTWYV'
# %% Sequence tools
def download_uniport_sequence(uniprot_id, output_dir):
"""
"""
output_file = op.join(output_dir, uniprot_id + '.fasta')
# If the file already exists, do nothing...
if op.isfile(output_file):
logger.debug('Sequence file {} already exists...'.format(output_file))
return output_file
logger.debug('Downloading sequence {}...'.format(uniprot_id + '.fasta'))
address = 'http://www.uniprot.org/uniprot/{}.fasta'.format(uniprot_id)
r = requests.get(address)
if r.status_code == 200:
with open(output_file, 'w') as ofh:
ofh.write(r.text)
return output_file
def convert_basestring_to_seqrecord(sequence, sequence_id='id'):
if any([isinstance(sequence, string_type) for string_type in six.string_types]):
seqrec = SeqRecord(Seq(sequence), id=str(sequence_id))
elif isinstance(sequence, Seq):
seqrec = SeqRecord(sequence, id=str(sequence_id))
elif isinstance(sequence, SeqRecord):
seqrec = sequence
else:
raise Exception("Wrong class type %s for ``sequence``" % str(type(sequence)))
return seqrec
# %%
class Sequence:
"""Class for calculating sequence level features."""
def __init__(self, sequence_file, provean_supset_file=None):
""".
Parameters
----------
sequence_file : str
Full filename of the file containing the protein sequence.
Only fasta format is supported.
provean_supset_file : str
Full path and
"""
logger.debug('Initialising a Sequence instance with parameters:')
logger.debug('sequence_file: {}'.format(sequence_file))
logger.debug('provean_supset_file: {}'.format(provean_supset_file))
self.sequence_file = sequence_file
self.seqrecord = SeqIO.read(self.sequence_file, 'fasta')
self.protein_id = helper.slugify(self.seqrecord.id)
self.sequence = str(self.seqrecord.seq)
# Provean supset
if provean_supset_file is not None and provean_supset_file != self.provean_supset_file:
shutil.copy(provean_supset_file, self.provean_supset_file)
shutil.copy(provean_supset_file, self.provean_supset_file + '.fasta')
if not self.provean_supset_exists:
logger.debug('Calculating provean supset...')
self._build_provean_supset()
else:
logger.debug('Provean supset is already calculated!')
self.provean_supset_length = self._get_provean_supset_length()
# Mutations
self.mutations = {}
def mutate(self, mutation):
if mutation in self.mutations:
return self.mutations[mutation]
if mutation[0] != self.sequence[int(mutation[1:-1]) - 1]:
logger.error('sequence: {}'.format(self.sequence))
logger.error('mutation: {}'.format(mutation))
raise errors.MutationMismatchError()
results = dict(
protein_id=self.protein_id,
mutation=mutation,
provean_score=self.run_provean(mutation),
matrix_score=self.score_pairwise(mutation[0], mutation[-1])
)
return results
@property
def provean_supset_file(self):
return op.join(
conf.CONFIGS['sequence_dir'],
helper.slugify(self.protein_id + '_provean_supset')
)
@property
def provean_supset_exists(self):
return (
op.isfile(self.provean_supset_file) and
op.isfile(self.provean_supset_file + '.fasta')
)
@property
def result(self):
result = dict(
protein_id=self.protein_id,
sequence=self.sequence,
sequence_file=op.relpath(self.sequence_file, conf.CONFIGS['unique_temp_dir']),
provean_supset_exists=self.provean_supset_exists,
provean_supset_file=op.relpath(
self.provean_supset_file, conf.CONFIGS['unique_temp_dir']),
provean_supset_length=self.provean_supset_length,
mutations=self.mutations,
)
return result
def _build_provean_supset(self, mutation=None):
"""
"""
logger.debug('Building Provean supporting set. This might take a while...')
atexit.register(_clear_provean_temp)
# Get the required parameters
any_position = 0
while self.sequence[any_position] not in CANONICAL_AMINO_ACIDS:
any_position += 1
first_aa = self.sequence[any_position]
if mutation is None:
mutation = '{0}{1}{0}'.format(first_aa, any_position + 1)
# Run provean
provean_score = self._run_provean(
mutation, save_supporting_set=True, check_mem_usage=True
)
return provean_score
def _get_provean_supset_length(self):
provean_supset_length = 0
with open(self.provean_supset_file) as fh:
for line in fh:
if line and not line.startswith('#'):
provean_supset_length += 1
return provean_supset_length
def run_provean(self, mutation, *args, **kwargs):
n_tries = 0
provean_score = None
while n_tries < 5:
n_tries += 1
try:
provean_score = self._run_provean(mutation, *args, **kwargs)
break
except errors.ProveanError as e:
bad_ids = re.findall("Entry not found in BLAST database: '(.*)'", e.args[0])
provean_supset_data = []
with open(self.provean_supset_file, 'rt') as ifh:
for line in ifh:
if any([(gi_id in line) for gi_id in bad_ids]):
logger.debug(
"Removing line '{}' from the provean supset file..."
.format(line.strip()))
else:
provean_supset_data.append(line)
with open(self.provean_supset_file, 'wt') as ofh:
ofh.writelines(provean_supset_data)
if provean_score is None:
# Recalculate provean supporting set
provean_score = self._build_provean_supset(mutation)
return provean_score
def _run_provean(self, mutation, save_supporting_set=False, check_mem_usage=False):
""".
Provean results look something like this::
#[23:28:34] clustering subject sequences...
#[23:28:34] selecting clusters...
#[23:28:34] 0 subject sequences in 0 clusters were selected for supporting sequences.
#[23:28:34] use the query itself as a supporting sequence
#[23:28:34] loading subject sequences from a FASTA file...
#[23:28:34] scores were computed based on the query sequence itself.
## Number of clusters: 1
## Number of supporting sequences used: 1
#[23:28:34] computing delta alignment scores...
#[23:28:34] printing PROVEAN scores...
### PROVEAN scores ##
## VARIATION SCORE
#M1A -6.000
Parameters
----------
domain_mutation : string
Mutation in domain coordinates (i.e. relative to the start of the domain)
Returns
-------
list
[result, error_message, return_code] --
The output from running a provean system command.
Raises
------
errors.ProveanError
Can raise this exception only if ``check_mem_usage`` is set to ``True``.
"""
if check_mem_usage:
# Get initial measurements of how much virtual memory and disk space is availible
disk_space_availible = psutil.disk_usage(
conf.CONFIGS['provean_temp_dir']).free / (1024**3)
logger.debug('Disk space availible: {:.2f} GB'.format(disk_space_availible))
if disk_space_availible < 5:
raise errors.ProveanError(
'Not enough disk space ({:.2f} GB) to run provean'
.format(disk_space_availible))
memory_availible = psutil.virtual_memory().available / float(1024)**3
logger.debug('Memory availible: {:.2f} GB'.format(memory_availible))
if memory_availible < 0.5:
raise errors.ProveanError(
'Not enough memory ({:.2f} GB) to run provean'
.format(memory_availible))
# Create a file with mutation
mutation_file = op.join(conf.CONFIGS['sequence_dir'], '{}.var'.format(mutation))
with open(mutation_file, 'w') as ofh:
ofh.write(mutation)
# Run provean
system_command = (
"provean " +
" -q '{}' ".format(self.sequence_file) +
" -v '{}' ".format(mutation_file) +
" -d " + op.join(conf.CONFIGS['blast_db_dir'], 'nr') +
" --tmp_dir '{}' ".format(conf.CONFIGS['provean_temp_dir']) +
" --num_threads {} ".format(conf.CONFIGS['n_cores']) +
" --psiblast '{}' ".format(helper.get_which('psiblast')) +
" --blastdbcmd '{}' ".format(helper.get_which('blastdbcmd')) +
" --cdhit '{}' ".format(helper.get_which('cd-hit'))
)
if self.provean_supset_exists:
# use supporting set
system_command += " --supporting_set '{}' ".format(self.provean_supset_file)
else:
system_command += " --save_supporting_set '{}' ".format(self.provean_supset_file)
logger.debug(system_command)
p = subprocess.Popen(
shlex.split(system_command),
cwd=conf.CONFIGS['sequence_dir'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True
)
logger.debug('Parent group id: {}'.format(os.getpgrp()))
child_process_group_id = os.getpgid(p.pid)
logger.debug('Child group id: {}'.format(child_process_group_id))
# Keep an eye on provean to make sure it doesn't do anything crazy
time.sleep(5)
while check_mem_usage and p.poll() is None:
disk_space_availible_now = (
psutil.disk_usage(conf.CONFIGS['provean_temp_dir']).free / float(1024)**3
)
if disk_space_availible_now < 5: # less than 5 GB of free disk space left
raise errors.ProveanResourceError(
'Ran out of disk space and provean had to be terminated ({} GB used)'
.format(disk_space_availible - disk_space_availible_now),
child_process_group_id)
memory_availible_now = (
psutil.virtual_memory().available / float(1024)**3
)
if memory_availible_now < 0.5:
raise errors.ProveanResourceError(
'Ran out of RAM and provean had to be terminated ({} GB left)'
.format(memory_availible - memory_availible_now),
child_process_group_id)
time.sleep(60) # Wait for 1 minute before checking again
# Collect the results and check for errors
stdout, stderr = p.communicate()
stdout = stdout.strip()
stderr = stderr.strip()
logger.debug(stdout)
# Extract provean score from the results message
provean_score = None
result_list = stdout.split('\n')
for i in range(len(result_list)):
if re.findall('# VARIATION\s*SCORE', result_list[i]):
provean_score = float(result_list[i + 1].split()[-1])
break
if p.returncode != 0 or provean_score is None:
logger.error('return_code: {}'.format(p.returncode))
logger.error('provean_score: {}'.format(provean_score))
logger.error('error_message: {}'.format(stderr))
raise errors.ProveanError(stderr)
return provean_score
# === Other sequence scores ===
def score_pairwise(self, seq1, seq2, matrix=None, gap_s=None, gap_e=None):
"""Get the BLOSUM (or what ever matrix is given) score."""
matrix = matrix or getattr(MatrixInfo, conf.CONFIGS['matrix_type'])
gap_s = gap_s or conf.CONFIGS['gap_start']
gap_e = gap_e or conf.CONFIGS['gap_extend']
score = 0
gap = False
for i in range(len(seq1)):
pair = (seq1[i], seq2[i])
if not gap:
if '-' in pair:
gap = True
score += gap_s
else:
score += self._score_match(pair, matrix)
else:
if '-' not in pair:
gap = False
score += self._score_match(pair, matrix)
else:
score += gap_e
return score
def _score_match(self, pair_match, matrix_match):
"""
"""
if pair_match not in matrix_match:
return matrix_match[(tuple(reversed(pair_match)))]
else:
return matrix_match[pair_match]
def _clear_provean_temp():
provean_temp_dir = conf.CONFIGS['provean_temp_dir']
logger.info("Clearning provean temporary files from '{}'...".format(provean_temp_dir))
for filename in os.listdir(provean_temp_dir):
file_path = os.path.join(provean_temp_dir, filename)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print(e)
|
ostrokach/elaspic
|
elaspic/elaspic_sequence.py
|
Python
|
mit
| 14,136
|
[
"BLAST"
] |
cd84ed8123917139cb5989f04f538bffbb992e96c0de1120e0552fb741bb6c77
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Custom astroid checker for set_trace calls."""
from pylint.interfaces import IAstroidChecker
from pylint.checkers import BaseChecker, utils
class SetTraceChecker(BaseChecker):
"""Custom astroid checker for set_trace calls."""
__implements__ = IAstroidChecker
name = 'settrace'
msgs = {
'E9101': ('set_trace call found', 'set-trace', None),
}
priority = -1
@utils.check_messages('set-trace')
def visit_call(self, node):
"""Visit a Call node."""
if hasattr(node, 'func'):
infer = utils.safe_infer(node.func)
if infer:
if getattr(node.func, 'name', None) == 'set_trace':
self.add_message('set-trace', node=node)
def register(linter):
"""Register this checker."""
linter.register_checker(SetTraceChecker(linter))
|
lahwaacz/qutebrowser
|
scripts/dev/pylint_checkers/qute_pylint/settrace.py
|
Python
|
gpl-3.0
| 1,643
|
[
"VisIt"
] |
01a172195a0153c5d572206a91d31673db108a42152b0461a08660cb24bd56e9
|
import sys, os, os.path, textwrap, hashlib
from pysamimport import pysam
import re
import inspect
from configparser import SafeConfigParser
from collections import defaultdict
class BadRead(RuntimeError):
def __init__(self):
RuntimeError.__init__(self, self.header)
class IsBadRead(BadRead):
header = "BadRead"
class IsDuplicate(BadRead):
header = "Alignment:IsDuplicate"
class IsQCFail(BadRead):
header = "Alignment:IsQCFail"
class IsSecondary(BadRead):
header = "Alignment:IsSecondary"
class IsUnmapped(BadRead):
header = "Alignment:IsUnmapped"
class TooShort(BadRead):
header = "TooShort"
class TooManyHits(BadRead):
header = "MultipleAlignments"
class BadCigar(BadRead):
header = "BadCIGAROperation"
class IndelAtSNV(BadRead):
header = "QueryIndelAtSNVLocus"
class GapAtSNV(BadRead):
header = "GapInQueryAtSNVLocus"
class SNVPadding(BadRead):
header = "SNVLocusAtEndOfRead"
class SNVEditPadding(BadRead):
header = "SubstitutionNearSNVLocus"
class TooManyEdits(BadRead):
header = "TooManyEdits"
class TooManyEditsOtherThanSNV(BadRead):
header = "TooManyEditsOtherThanSNV"
class TooManyQueryGaps(BadRead):
header = "TooManyQueryGaps"
class MappingQualityTooLow(BadRead):
header = "MappingQualityTooLow"
class BaseQualityTooLow(BadRead):
header = "BaseQualityTooLow"
class OrphanRead(BadRead):
header = "OrphanRead"
class OverlapRead(BadRead):
header = "OverlapRead"
class DuplicateRead(BadRead):
header = "DuplicateRead"
BadRead.allheaders = [cls[1].header for cls in inspect.getmembers(sys.modules[
__name__], lambda member: inspect.isclass(member) and issubclass(member, BadRead) and member != BadRead)]
class OtherError(RuntimeError):
def __init__(self):
RuntimeError.__init__(self, self.msg)
class NoNMTag(OtherError):
msg = "No NM tag provided for alignments, cannot filter based on edit distance."
class NoNHTag(OtherError):
msg = "No NH tag provided for alignments, cannot filter based on number of hits."
class NoMDTag(OtherError):
msg = "No MD tag provided for alignments, cannot filter based on position of edits."
BAM_CMATCH = 0
BAM_CREF_SKIP = 3
class ReadFilter(object):
def pileup_kwargs(self):
return dict(stepper='nofilter',
ignore_overlaps=True,
min_base_quality=0)
def pileup_start(self,pileupcolumn):
pass
def pileup_end(self,pileupcolumn):
pass
@staticmethod
def extract_base_(pileupread):
al = pileupread.alignment
readbase = al.query_sequence[pileupread.query_position]
return al, pileupread.query_position, readbase
class BasicReadFilter(ReadFilter):
NONH = "Warning: Tag NH missing from alignments"
NONM = "Warning: Tag NM missing from alignments"
NOMD = "Warning: Tag MD missing from alignments"
def __init__(self, maxsegments=1, minlength=45,
maxhits=1, maxedits=1, mapq=4,
warnings=set([NONM, NOMD])):
self.minlength = minlength
self.maxsegments = maxsegments
self.maxhits = maxhits
self.maxedits = maxedits
self.warnings = warnings
self.mapq = mapq
if self.warnings == None:
self.warnings = set()
def segments(self, al):
if al.is_duplicate:
raise IsDuplicate()
if al.is_qcfail:
raise IsQCFail()
if al.is_secondary:
raise IsSecondary()
if al.is_unmapped:
raise IsUnmapped()
if al.qlen < self.minlength:
raise TooShort()
if al.mapq < self.mapq:
raise MappingQualityTooLow()
try:
if int(al.opt('NH')) > self.maxhits:
raise TooManyHits()
except KeyError:
if self.NONH in self.warnings:
print(self.NONH + \
".\n Cannot filter out reads that align to mutiple loci.", file=sys.stderr)
self.warnings.remove(self.NONH)
if any([t[0] not in (BAM_CMATCH, BAM_CREF_SKIP) for t in al.cigartuples]):
raise BadCigar()
segments = [t[1] for t in al.cigar if t[0] == BAM_CMATCH]
if len(segments) > self.maxsegments:
raise TooManyQueryGaps()
try:
if int(al.get_tag('NM')) > self.maxedits:
raise TooManyEdits()
except KeyError:
if self.NONM in self.warnings:
print(self.NONM + \
".\n Cannot filter out reads with too many substitutions.", file=sys.stderr)
self.warnings.remove(self.NONM)
return segments
class SNVPileupReadFilter(BasicReadFilter):
def __init__(self, minpad=3, minsubstdist=3, maxedits=1, **kw):
kw['maxedits'] = (maxedits + 1)
self.maxedits = maxedits
self.minpad = minpad
self.minsubstdist = minsubstdist
super(SNVPileupReadFilter,self).__init__(**kw)
def findseg(self, pos, segments):
i = 0
while True:
if (pos <= segments[i]):
return i, pos
pos -= segments[i]
i += 1
return None
def extract_base(self, pileupread):
if pileupread.indel != 0:
raise IndelAtSNV()
if pileupread.is_del:
raise GapAtSNV()
al = pileupread.alignment
segments = self.segments(al)
qpos = pileupread.query_position
seg, qpos = self.findseg(qpos, segments)
if qpos < self.minpad or (segments[seg] - qpos) < self.minpad:
raise SNVPadding()
try:
edits = re.split(r'(\d+)', al.get_tag('MD'))[1:-1]
substs = dict()
reference = None
for i in range(0, len(edits) - 1, 2):
pos = int(edits[i])
substs[pos] = (edits[i + 1], al.query_sequence[pileupread.query_position])
if pos == pileupread.query_position:
reference = edits[i + 1]
elif abs(pos - pileupread.query_position) < self.minsubstdist:
raise SNVEditPadding()
try:
if int(al.get_tag('NM')) > (self.maxedits + (0 if (reference) else 1)):
raise TooManyEditsOtherThanSNV()
except KeyError:
if self.NONM in self.warnings:
print(self.NONM + \
".\n Cannot filter out reference reads with one too many\n substitutions.", file=sys.stderr)
self.warnings.remove(self.NONM)
except KeyError:
if self.NOMD in self.warnings:
print(self.NOMD + \
".\n Cannot filter out reads with edits too close to the SNV locus\n or reference reads with one too many substitutions.", file=sys.stderr)
self.warnings.remove(self.NOMD)
readbase = al.query_sequence[pileupread.query_position]
return al, pileupread.query_position, readbase
class BasicFilter(ReadFilter):
def __init__(self,
skip_duplicate=True,
skip_qcfail=True,
skip_secondary=True,
skip_unmapped=True):
self._skip_duplicate = skip_duplicate
self._skip_qcfail = skip_qcfail
self._skip_secondary = skip_secondary
self._skip_unmapped = skip_unmapped
def extract_base(self, pileupread):
if pileupread.indel != 0:
raise IndelAtSNV()
if pileupread.is_del:
raise GapAtSNV()
al = pileupread.alignment
if self._skip_duplicate and al.is_duplicate:
raise IsDuplicate()
if self._skip_qcfail and al.is_qcfail:
raise IsQCFail()
if self._skip_secondary and al.is_secondary:
raise IsSecondary()
if self._skip_unmapped and al.is_unmapped:
raise IsUnmapped()
readbase = al.query_sequence[pileupread.query_position]
return al, pileupread.query_position, readbase
class BaseQualityFilter(ReadFilter):
def __init__(self, min_base_quality=None):
self._min_base_quality = min_base_quality
def extract_base(self, pileupread):
alignment, query_pos, readbase = self.extract_base_(pileupread)
if self._min_base_quality != None and alignment.query_qualities[query_pos] < self._min_base_quality:
raise BaseQualityTooLow()
return alignment, query_pos, readbase
class MappingQualityFilter(ReadFilter):
def __init__(self, min_mapping_quality=None):
self._min_mapping_quality = min_mapping_quality
def extract_base(self, pileupread):
alignment, query_pos, readbase = self.extract_base_(pileupread)
if self._min_mapping_quality != None and alignment.mapping_quality < self._min_mapping_quality:
raise MappingQualityTooLow()
return alignment, query_pos, readbase
class ReadLengthFilter(ReadFilter):
def __init__(self, min_length=None):
self._min_length = min_length
def extract_base(self, pileupread):
alignment, query_pos, readbase = self.extract_base_(pileupread)
if self._min_length != None and alignment.query_length < self._min_length:
raise TooShort()
return alignment, query_pos, readbase
class EditsFilter(ReadFilter):
def __init__(self, max_edits=None):
self._max_edits = max_edits
def extract_base(self, pileupread):
alignment, query_pos, readbase = self.extract_base_(pileupread)
try:
if self._max_edits != None and int(alignment.get_tag('NM')) > self._max_edits:
raise TooManyEdits()
except KeyError:
raise NoNMTag()
return alignment, query_pos, readbase
class HitsFilter(ReadFilter):
def __init__(self, max_hits=None):
self._max_hits = max_hits
def extract_base(self, pileupread):
alignment, query_pos, readbase = self.extract_base_(pileupread)
try:
if self._max_hits != None and int(alignment.get_tag('NH')) > self._max_hits:
raise TooManyHits()
except KeyError:
raise NoNHTag()
return alignment, query_pos, readbase
class SegmentsFilter(ReadFilter):
def __init__(self, max_segments=None):
self._max_segments = max_segments
@staticmethod
def segments(alignment):
if any([t[0] not in (BAM_CMATCH, BAM_CREF_SKIP) for t in alignment.cigar]):
raise BadCigar()
segments = [t[1] for t in alignment.cigar if t[0] == BAM_CMATCH]
return segments
def extract_base(self, pileupread):
alignment, query_pos, readbase = self.extract_base_(pileupread)
segments = self.segments(alignment)
if self._max_segments != None and len(segments) > self.maxsegments:
raise TooManyQueryGaps()
return alignment, query_pos, readbase
class EditPositionFilter(ReadFilter):
def __init__(self, min_edge_dist=None, min_subst_dist=None, max_other_edits=None):
self._min_edge_dist = min_edge_dist
self._min_subst_dist = min_subst_dist
self._max_other_edits = max_other_edits
def findseg(self, pos, segments):
i = 0
while True:
if (pos <= segments[i]):
return i, pos
pos -= segments[i]
i += 1
return None
def extract_base(self, pileupread):
alignment, query_pos, readbase = self.extract_base_(pileupread)
segments = SegmentsFilter.segments(alignment)
seg, qpos = self.findseg(query_pos, segments)
if self._min_edge_dist != None and \
(qpos < self._min_edge_dist or (segments[seg] - qpos) < self._min_edge_dist):
raise SNVEdgeDist()
try:
edits = re.split(r'(\d+)', alignment.get_tag('MD'))[1:-1]
except KeyError:
raise NoMDTag()
reference = False
for i in range(0, len(edits) - 1, 2):
pos = int(edits[i])
if pos == query_pos:
reference = True
elif self._min_subst_dist != None and abs(pos - query_pos) < self._min_subst_dist:
raise SNVOtherEditDist()
try:
if self._max_other_edits != None and \
int(al.get_tag('NM')) > self._max_other_edits + (0 if reference else 1):
raise TooManyEditsOtherThanSNV()
except KeyError:
raise NoNMTag()
return alignment, query_pos, readbase
class OrphanFilter(ReadFilter):
def __init__(self, remove=False):
self._remove = remove
def extract_base(self, pileupread):
alignment, query_pos, readbase = self.extract_base_(pileupread)
if self._remove and alignment.is_paired and (not alignment.is_proper_pair):
raise OrphanRead()
return alignment, query_pos, readbase
class OverlapFilter(ReadFilter):
def __init__(self, remove=False):
self._remove = remove
def pileup_start(self,pileupcolumn):
self._seen = dict()
def extract_base(self, pileupread):
alignment, query_pos, readbase = self.extract_base_(pileupread)
if self._remove and \
alignment.is_paired and \
alignment.is_proper_pair and \
alignment.query_name in self._seen:
raise OverlapRead()
self._seen[alignment.query_name] = True
return alignment, query_pos, readbase
class UniqueReads(ReadFilter):
def __init__(self, remove_dups=False):
self._remove = remove_dups
def pileup_start(self,pileupcolumn):
self._seen = set()
def extract_base(self, pileupread):
alignment, query_pos, readbase = self.extract_base_(pileupread)
seqhash = hashlib.md5(alignment.query_sequence.encode('utf8')).hexdigest().lower()
if self._remove and seqhash in self._seen:
raise DuplicateRead()
self._seen.add(seqhash)
return alignment, query_pos, readbase
class CompoundMethod(object):
def __init__(self):
self._elements = []
self._description = ""
self._specification = []
def set_special_params(self,method,**params):
raise NotImplemented()
def add_element(self,element):
self._elements.append(element)
def add_desc(self,desc):
self._description = desc
def add_spec(self,spec):
self._specification.append(spec)
def tostr(self):
lines = []
if self._description:
lines.append("Description:")
for line in textwrap.wrap(self._description,50):
lines.append(" "+line)
lines.append("Specification:")
for line in self._specification:
lines.append(" "+line)
return "\n".join(lines)
class CompoundFilter(CompoundMethod,ReadFilter):
def __init__(self):
CompoundMethod.__init__(self)
# Defaults, unlikely to need to be changed...
self._pileup_params = dict(stepper='nofilter',
min_base_quality=0,
ignore_overlaps=True)
def set_special_params(self,method,**params):
assert method == "Pileup"
self._pileup_params = dict(params.items())
def extract_base(self, pileupread):
for f in self._elements:
alignment, query_pos, readbase = f.extract_base(pileupread)
return alignment, query_pos, readbase
def pileup_kwargs(self):
return self._pileup_params
def pileup_start(self,pileupcolumn):
for f in self._elements:
f.pileup_start(pileupcolumn)
def pileup_end(self,pileupcolumn):
for f in self._elements:
f.pileup_end(pileupcolumn)
class ReadGroup(object):
def __init__(self, acceptlist=None, missing=None):
self._default = missing
self.set_acceptlist(acceptlist)
def default(self):
return self._default
def accept(self,value):
if self._acceptlist == None or value in self._acceptlist:
return True
return False
def set_acceptlist(self,acceptlist=None):
self._acceptlist = None
if acceptlist:
try:
self._acceptlist = set(open(acceptlist).read().split())
except IOError:
raise IOError("Can't open read group acceptlist file: %s"%(acceptlist))
def group(self, alignment):
return None
class CompoundGroup(CompoundMethod,ReadGroup):
def group(self, alignment):
grp = None
for rg in self._elements:
grp = rg.group(alignment)
if grp != None:
break
return grp
class MethodFactory(object):
specialMethods = []
def __init__(self):
iniPath = []
progdir = os.path.split(__file__)[0]
iniPath.append(os.path.join(progdir,self.iniFile))
progdir = os.path.split(progdir)[0]
iniPath.append(os.path.join(progdir,self.iniFile))
progdir = os.path.split(progdir)[0]
progdir = os.path.split(progdir)[0]
iniPath.append(os.path.join(progdir,self.iniFile))
iniPath.append(os.path.join(os.path.expanduser("~"),self.iniFile))
iniPath.append(os.path.join(os.path.expanduser("~"),"."+self.iniFile))
iniPath.append(os.path.join(os.getcwd(),self.iniFile))
self.config = SafeConfigParser()
self.config.optionxform = str
self.config.read(iniPath)
if len(self.config.sections()) == 0:
print("Configuration file path:",iniPath,file=sys.stderr)
raise RuntimeError("Can't find configuration file %s for %s"%(self.iniFile,self.__class__.__name__))
def tovalue(self,vstr):
vstr = vstr.strip()
if vstr.startswith('"') and vstr.endswith('"'):
return str(vstr[1:-1])
if vstr.startswith("'") and vstr.endswith("'"):
return str(vstr[1:-1])
if vstr in ('True','False','None'):
v = eval(vstr)
else:
try:
v = str(vstr)
v = float(vstr)
v = int(vstr)
except ValueError:
pass
return v
DESC='Description'
def list(self):
methods = []
for sec in self.config.sections():
if self.config.has_option(sec,self.DESC):
desc = self.config.get(sec,self.DESC)
methods.append((sec,desc))
else:
methods.append((sec,self.defaultDesc%(sec,)))
methods.sort()
return methods
def get(self,name,params=""):
if not self.config.has_section(name):
raise LookupError(self.nomethodError%(name,))
paramsbyopt = defaultdict(str)
for line in map(str.strip,params.split(';')):
if line == "":
continue
opt,rest = line.split(':',1)
opt = opt.strip()
value = rest.strip()
paramsbyopt[opt] = (paramsbyopt[opt] + " " + value).strip()
method = self.compoundMethodClass()
for opt,value in self.config.items(name):
opt = opt.strip()
value = value.strip()
if opt == self.DESC:
method.add_desc(value)
continue
kwargs = dict()
kvpairs = []
if paramsbyopt[opt]:
kvpairs += re.split(r'\s+(\w+)=',' '+paramsbyopt[opt])[1:]
elif paramsbyopt["*"]:
kvpairs += re.split(r'\s+(\w+)=',' '+paramsbyopt["*"])[1:]
kvpairs += re.split(r'\s+(\w+)=',' '+value)[1:]
seenk = set()
for i in range(0,len(kvpairs),2):
k = kvpairs[i]
if k in seenk:
continue
seenk.add(k)
v = self.tovalue(kvpairs[i+1])
vstr = str(v)
if self.tovalue(kvpairs[i+1]) != self.tovalue(vstr):
vstr = '"%s"'%(v,)
if i == 1:
method.add_spec("%s: %s=%s"%(opt,k,vstr))
else:
method.add_spec("%s %s=%s"%(" "*len(opt),k,vstr))
kwargs[k] = v
if len(kvpairs) == 1:
method.add_spec("%s:"%(opt,))
if opt in self.specialMethods:
method.set_special_params(opt,**kwargs)
else:
try:
methodcls = getattr(sys.modules[__name__], opt)
except AttributeError:
raise LookupError(self.noelementError%(opt,name))
if not issubclass(methodcls,self.baseMethodClass):
raise LookupError(self.noelementError%(opt,name))
try:
method.add_element(methodcls(**kwargs))
except TypeError as e:
msg = e.args[0]
msg = msg.replace("__init__()",self.paramError%(opt,name))
e.args = tuple([msg] + list(e.args[1:]))
raise
return method
class ReadFilterFactory(MethodFactory):
baseMethodClass = ReadFilter
compoundMethodClass = CompoundFilter
iniFile = 'filter.ini'
defaultDesc = 'Aligned read filter: %s.'
nomethodError = "Can\'t find named read filter: %s."
noelementError = "Can\'t find element %s of read filter %s."
paramError = "Element %s of read filter %s"
specialMethods = ['Pileup']
class ReadGroupFactory(MethodFactory):
baseMethodClass = ReadGroup
compoundMethodClass = CompoundGroup
iniFile = 'group.ini'
defaultDesc = 'Read groups method: %s.'
nomethodError = "Can\'t find named read group method: %s."
noelementError = "Can\'t find element %s of read group method %s."
paramError = "Element %s of read group method %s"
class ReadNameRegex(ReadGroup):
def __init__(self, regex, regexgrp=1, **kw):
super(ReadNameRegex,self).__init__(**kw)
self._regex = re.compile(regex)
self._regexgrp = int(regexgrp)
def group(self, alignment):
name = alignment.query_name
m = self._regex.search(name)
if m:
try:
value = m.group(self._regexgrp)
if self.accept(value):
return value
except IndexError:
pass
return self.default()
class ReadNameWord(ReadGroup):
def __init__(self, field_index, field_sep='_', **kw):
super(ReadNameWord,self).__init__(**kw)
self._index = field_index
self._sep = field_sep
def group(self, alignment):
name = alignment.query_name
words = name.split(self._sep)
try:
value = words[self._index]
if self.accept(value):
return value
except IndexError:
pass
return self.default()
class ReadTagValue(ReadGroup):
def __init__(self, tag, **kw):
super(ReadTagValue,self).__init__(**kw)
self._tag = tag
def group(self, alignment):
try:
value = str(alignment.opt(self._tag))
if self.accept(value):
return value
except KeyError:
pass
return self.default()
class RGTag(ReadTagValue):
def __init__(self,**kw):
kw['tag'] = "RG"
super(RGTag,self).__init__(**kw)
|
HorvathLab/NGS
|
common/src/util.py
|
Python
|
mit
| 23,699
|
[
"pysam"
] |
245db59589656471adc33afe21b5eb211d28d9afc85796a04d8df1e19ab6f84f
|
### Refer to G. Kresse, Phys. Rev. B 73, 045112 (2006)
### for comparison of macroscopic and microscopic dielectric constant
### and absorption peaks.
import os
import sys
import numpy as np
from ase.units import Bohr
from ase.structure import bulk
from ase.parallel import paropen
from gpaw import GPAW, FermiDirac
from gpaw.mpi import serial_comm, rank, size
from gpaw.utilities import devnull
from gpaw.response.df import DF
if rank != 0:
sys.stdout = devnull
GS = 1
ABS = 1
if GS:
# Ground state calculation
a = 5.431 #10.16 * Bohr
atoms = bulk('Si', 'diamond', a=a)
calc = GPAW(h=0.20,
kpts=(12,12,12),
xc='LDA',
basis='dzp',
txt='si_gs.txt',
nbands=80,
eigensolver='cg',
occupations=FermiDirac(0.001),
convergence={'bands':70})
atoms.set_calculator(calc)
atoms.get_potential_energy()
calc.write('si.gpw','all')
if ABS:
w = np.linspace(0, 24, 481)
q = np.array([0.0, 0.00001, 0.])
# getting macroscopic constant
df = DF(calc='si.gpw', q=q, w=w, eta=0.0001,
hilbert_trans=False, txt='df_1.out',
ecut=150, optical_limit=True)
df.get_macroscopic_dielectric_constant()
df.write('df_1.pckl')
#getting absorption spectrum
df = DF(calc='si.gpw', q=q, w=w, eta=0.1,
ecut=150, optical_limit=True, txt='df_2.out')
df.get_absorption_spectrum(filename='si_abs.dat')
df.check_sum_rule()
df.write('df_2.pckl')
|
ajylee/gpaw-rtxs
|
doc/tutorials/dielectric_response/silicon_ABS.py
|
Python
|
gpl-3.0
| 1,536
|
[
"ASE",
"GPAW"
] |
a552bcc37a48c51f431266301d26a01f7f5a9e43498b3b69ad9f8de1cb591cf6
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Test the empirical null estimator.
"""
import warnings
import numpy as np
from nose.tools import assert_true
from ..empirical_pvalue import (
NormalEmpiricalNull, smoothed_histogram_from_samples, fdr, fdr_threshold,
gaussian_fdr_threshold, gaussian_fdr)
def setup():
# Suppress warnings during tests to reduce noise
warnings.simplefilter("ignore")
def teardown():
# Clear list of warning filters
warnings.resetwarnings()
def test_efdr():
# generate the data
n = 100000
x = np.random.randn(n)
x[:3000] += 3
# make the tests
efdr = NormalEmpiricalNull(x)
np.testing.assert_array_less(efdr.fdr(3.0), 0.2)
np.testing.assert_array_less(-efdr.threshold(alpha=0.05), -2.8)
np.testing.assert_array_less(-efdr.uncorrected_threshold(alpha=0.001), -2.5)
def test_smooth_histo():
n = 100
x = np.random.randn(n)
h, c = smoothed_histogram_from_samples(x, normalized=True)
thh = 1. / np.sqrt(2 * np.pi)
hm = h.max()
assert_true(np.absolute(hm - thh) < 0.15)
def test_fdr_pos():
# test with some significant values
np.random.seed([1])
x = np.random.rand(100)
x[:10] *= (.05 / 10)
q = fdr(x)
assert_true((q[:10] < .05).all())
pc = fdr_threshold(x)
assert_true((pc > .0025) & (pc < .1))
def test_fdr_neg():
# test without some significant values
np.random.seed([1])
x = np.random.rand(100) * .8 + .2
q =fdr(x)
assert_true((q > .05).all())
pc = fdr_threshold(x)
assert_true(pc == .05 / 100)
def test_gaussian_fdr():
# Test that fdr works on Gaussian data
np.random.seed([2])
x = np.random.randn(100) * 2
fdr = gaussian_fdr(x)
assert_true(fdr.min() < .05)
assert_true(fdr.max() > .99)
def test_gaussian_fdr_threshold():
np.random.seed([2])
x = np.random.randn(100) * 2
ac = gaussian_fdr_threshold(x)
assert_true(ac > 2.0)
assert_true(ac < 4.0)
assert_true(ac > gaussian_fdr_threshold(x, alpha=.1))
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
|
bthirion/nipy
|
nipy/algorithms/statistics/tests/test_empirical_pvalue.py
|
Python
|
bsd-3-clause
| 2,172
|
[
"Gaussian"
] |
6f3cf94236d09e1c568fdc3ef8f02a4e4542c6e3c0683ac27bb2561252523290
|
#!/usr/bin/env python
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2013, The Materials Virtual Lab"
__version__ = "0.2"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "Aug 3, 2015"
from setuptools import setup, find_packages
import os
if __name__ == "__main__":
setup(
name='fireworks-vasp',
version='0.2',
description='VASP Plugin for fireworks',
long_description='VASP Plugin for fireworks',
url='https://github.com/materialsproject/fireworks',
author="Shyue Ping Ong",
author_email='anubhavster@gmail.com',
license='MIT',
packages=find_packages(),
package_data={'fireworks':['user_objects/queue_adapters/*.txt']},
zip_safe=False,
install_requires=['pymatgen>=2.9.0', 'custodian>=0.7.0',
'fireworks>=0.66'],
classifiers=['Programming Language :: Python :: 2.7',
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Intended Audience :: System Administrators',
'Intended Audience :: Information Technology',
'Operating System :: OS Independent',
'Topic :: Other/Nonlisted Topic',
'Topic :: Scientific/Engineering'],
test_suite='nose.collector',
tests_require=['nose'],
scripts=[os.path.join('scripts', f) for f in os.listdir('scripts')]
)
|
materialsvirtuallab/fireworks-vasp
|
setup.py
|
Python
|
mit
| 1,514
|
[
"VASP",
"pymatgen"
] |
e4d30b6e47c0e5a703fcf161906cc4ba5449c441ce4f80a3b27caec29d66b123
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""priors.py -- This module contains various objects to be used as priors.
When called these return the ln-prior-probability, and they can also be used to
construct prior transforms (for nested sampling) and can be sampled from.
"""
import numpy as np
import scipy.stats
__all__ = ["Prior", "TopHat", "Normal", "ClippedNormal",
"LogNormal", "LogUniform", "Beta",
"StudentT", "SkewNormal"]
class Prior(object):
"""Encapsulate the priors in an object. Each prior should have a
distribution name and optional parameters specifying scale and location
(e.g. min/max or mean/sigma). These can be aliased at instantiation using
the ``parnames`` keyword. When called, the argument should be a variable
and the object should return the ln-prior-probability of that value.
.. code-block:: python
ln_prior_prob = Prior()(value)
Should be able to sample from the prior, and to get the gradient of the
prior at any variable value. Methods should also be avilable to give a
useful plotting range and, if there are bounds, to return them.
:param parnames:
A list of names of the parameters, used to alias the intrinsic
parameter names. This way different instances of the same Prior can
have different parameter names, in case they are being fit for....
"""
def __init__(self, parnames=[], name='', **kwargs):
"""Constructor.
:param parnames:
A list of names of the parameters, used to alias the intrinsic
parameter names. This way different instances of the same Prior
can have different parameter names, in case they are being fit for....
"""
if len(parnames) == 0:
parnames = self.prior_params
assert len(parnames) == len(self.prior_params)
self.alias = dict(zip(self.prior_params, parnames))
self.params = {}
self.name = name
self.update(**kwargs)
def __repr__(self):
argstring = ['{}={}'.format(k, v) for k, v in list(self.params.items())]
return '{}({})'.format(self.__class__, ",".join(argstring))
def update(self, **kwargs):
"""Update `params` values using alias.
"""
for k in self.prior_params:
try:
self.params[k] = kwargs[self.alias[k]]
except(KeyError):
pass
# FIXME: Should add a check for unexpected kwargs.
def __len__(self):
"""The length is set by the maximum size of any of the prior_params.
Note that the prior params must therefore be scalar of same length as
the maximum size of any of the parameters. This is not checked.
"""
return max([np.size(self.params.get(k, 1)) for k in self.prior_params])
def __call__(self, x, **kwargs):
"""Compute the value of the probability desnity function at x and
return the ln of that.
:param x:
Value of the parameter, scalar or iterable of same length as the
Prior object.
:param kwargs: optional
All extra keyword arguments are sued to update the `prior_params`.
:returns lnp:
The natural log of the prior probability at x, scalar or ndarray of
same length as the prior object.
"""
if len(kwargs) > 0:
self.update(**kwargs)
pdf = self.distribution.pdf
try:
p = pdf(x, *self.args, loc=self.loc, scale=self.scale)
except(ValueError):
# Deal with `x` vectors of shape (nsamples, len(prior))
# for pdfs that don't broadcast nicely.
p = [pdf(_x, *self.args, loc=self.loc, scale=self.scale)
for _x in x]
p = np.array(p)
with np.errstate(invalid='ignore'):
lnp = np.log(p)
return lnp
def sample(self, nsample=None, **kwargs):
"""Draw a sample from the prior distribution.
:param nsample: (optional)
Unused
"""
if len(kwargs) > 0:
self.update(**kwargs)
return self.distribution.rvs(*self.args, size=len(self),
loc=self.loc, scale=self.scale)
def unit_transform(self, x, **kwargs):
"""Go from a value of the CDF (between 0 and 1) to the corresponding
parameter value.
:param x:
A scalar or vector of same length as the Prior with values between
zero and one corresponding to the value of the CDF.
:returns theta:
The parameter value corresponding to the value of the CDF given by
`x`.
"""
if len(kwargs) > 0:
self.update(**kwargs)
return self.distribution.ppf(x, *self.args,
loc=self.loc, scale=self.scale)
def inverse_unit_transform(self, x, **kwargs):
"""Go from the parameter value to the unit coordinate using the cdf.
"""
if len(kwargs) > 0:
self.update(**kwargs)
return self.distribution.cdf(x, *self.args,
loc=self.loc, scale=self.scale)
def gradient(self, theta):
raise(NotImplementedError)
@property
def loc(self):
"""This should be overridden.
"""
return 0
@property
def scale(self):
"""This should be overridden.
"""
return 1
@property
def args(self):
return []
@property
def range(self):
raise(NotImplementedError)
@property
def bounds(self):
raise(NotImplementedError)
def serialize(self):
raise(NotImplementedError)
class TopHat(Prior):
"""A simple uniform prior, described by two parameters
:param mini:
Minimum of the distribution
:param maxi:
Maximum of the distribution
"""
prior_params = ['mini', 'maxi']
distribution = scipy.stats.uniform
@property
def scale(self):
return self.params['maxi'] - self.params['mini']
@property
def loc(self):
return self.params['mini']
@property
def range(self):
return (self.params['mini'], self.params['maxi'])
def bounds(self, **kwargs):
if len(kwargs) > 0:
self.update(**kwargs)
return self.range
class Normal(Prior):
"""A simple gaussian prior.
:param mean:
Mean of the distribution
:param sigma:
Standard deviation of the distribution
"""
prior_params = ['mean', 'sigma']
distribution = scipy.stats.norm
@property
def scale(self):
return self.params['sigma']
@property
def loc(self):
return self.params['mean']
@property
def range(self):
nsig = 4
return (self.params['mean'] - nsig * self.params['sigma'],
self.params['mean'] + nsig * self.params['sigma'])
def bounds(self, **kwargs):
#if len(kwargs) > 0:
# self.update(**kwargs)
return (-np.inf, np.inf)
class ClippedNormal(Prior):
"""A Gaussian prior clipped to some range.
:param mean:
Mean of the normal distribution
:param sigma:
Standard deviation of the normal distribution
:param mini:
Minimum of the distribution
:param maxi:
Maximum of the distribution
"""
prior_params = ['mean', 'sigma', 'mini', 'maxi']
distribution = scipy.stats.truncnorm
@property
def scale(self):
return self.params['sigma']
@property
def loc(self):
return self.params['mean']
@property
def range(self):
return (self.params['mini'], self.params['maxi'])
@property
def args(self):
a = (self.params['mini'] - self.params['mean']) / self.params['sigma']
b = (self.params['maxi'] - self.params['mean']) / self.params['sigma']
return [a, b]
def bounds(self, **kwargs):
if len(kwargs) > 0:
self.update(**kwargs)
return self.range
class LogUniform(Prior):
"""Like log-normal, but the distribution of natural log of the variable is
distributed uniformly instead of normally.
:param mini:
Minimum of the distribution
:param maxi:
Maximum of the distribution
"""
prior_params = ['mini', 'maxi']
distribution = scipy.stats.reciprocal
@property
def args(self):
a = self.params['mini']
b = self.params['maxi']
return [a, b]
@property
def range(self):
return (self.params['mini'], self.params['maxi'])
def bounds(self, **kwargs):
if len(kwargs) > 0:
self.update(**kwargs)
return self.range
class Beta(Prior):
"""A Beta distribution.
:param mini:
Minimum of the distribution
:param maxi:
Maximum of the distribution
:param alpha:
:param beta:
"""
prior_params = ['mini', 'maxi', 'alpha', 'beta']
distribution = scipy.stats.beta
@property
def scale(self):
return self.params.get('maxi', 1) - self.params.get('mini', 0)
@property
def loc(self):
return self.params.get('mini', 0)
@property
def args(self):
a = self.params['alpha']
b = self.params['beta']
return [a, b]
@property
def range(self):
return (self.params.get('mini',0), self.params.get('maxi',1))
def bounds(self, **kwargs):
if len(kwargs) > 0:
self.update(**kwargs)
return self.range
class LogNormal(Prior):
"""A log-normal prior, where the natural log of the variable is distributed
normally. Useful for parameters that cannot be less than zero.
Note that ``LogNormal(np.exp(mode) / f) == LogNormal(np.exp(mode) * f)``
and ``f = np.exp(sigma)`` corresponds to "one sigma" from the peak.
:param mode:
Natural log of the variable value at which the probability density is
highest.
:param sigma:
Standard deviation of the distribution of the natural log of the
variable.
"""
prior_params = ['mode', 'sigma']
distribution = scipy.stats.lognorm
@property
def args(self):
return [self.params["sigma"]]
@property
def scale(self):
return np.exp(self.params["mode"] + self.params["sigma"]**2)
@property
def loc(self):
return 0
@property
def range(self):
nsig = 4
return (np.exp(self.params['mode'] + (nsig * self.params['sigma'])),
np.exp(self.params['mode'] - (nsig * self.params['sigma'])))
def bounds(self, **kwargs):
return (0, np.inf)
class LogNormalLinpar(Prior):
"""A log-normal prior, where the natural log of the variable is distributed
normally. Useful for parameters that cannot be less than zero.
LogNormal(mode=x, sigma=y) is equivalent to
LogNormalLinpar(mode=np.exp(x), sigma_factor=np.exp(y))
:param mode:
The (linear) value of the variable where the probability density is
highest. Must be > 0.
:param sigma_factor:
The (linear) factor describing the dispersion of the log of the
variable. Must be > 0
"""
prior_params = ['mode', 'sigma_factor']
distribution = scipy.stats.lognorm
@property
def args(self):
return [np.log(self.params["sigma_factor"])]
@property
def scale(self):
k = self.params["sigma_factor"]**np.log(self.params["sigma_factor"])
return self.params["mode"] * k
@property
def loc(self):
return 0
@property
def range(self):
nsig = 4
return (self.params['mode'] * (nsig * self.params['sigma_factor']),
self.params['mode'] / (nsig * self.params['sigma_factor']))
def bounds(self, **kwargs):
return (0, np.inf)
class SkewNormal(Prior):
"""A normal distribution including a skew parameter
:param location:
Center (*not* mean, mode, or median) of the distribution.
The center will approach the mean as skew approaches zero.
:param sigma:
Standard deviation of the distribution
:param skew:
Skewness of the distribution
"""
prior_params = ['location', 'sigma', 'skew']
distribution = scipy.stats.skewnorm
@property
def args(self):
return [self.params['skew']]
@property
def scale(self):
return self.params['sigma']
@property
def loc(self):
return self.params['location']
@property
def range(self):
nsig = 4
return (self.params['location'] - nsig * self.params['sigma'],
self.params['location'] + nsig * self.params['sigma'])
def bounds(self, **kwargs):
return (-np.inf, np.inf)
class StudentT(Prior):
"""A Student's T distribution
:param mean:
Mean of the distribution
:param scale:
Size of the distribution, analogous to the standard deviation
:param df:
Number of degrees of freedom
"""
prior_params = ['mean', 'scale', 'df']
distribution = scipy.stats.t
@property
def args(self):
return [self.params['df']]
@property
def scale(self):
return self.params['scale']
@property
def loc(self):
return self.params['mean']
@property
def range(self):
return scipy.stats.t.interval(0.995, self.params['df'], self.params['mean'], self.params['scale'])
def bounds(self, **kwargs):
return (-np.inf, np.inf)
|
bd-j/bsfh
|
prospect/models/priors.py
|
Python
|
gpl-2.0
| 13,637
|
[
"Gaussian"
] |
624d54528342ec0eb6b04c479fb250edb71e9e16ab5f99eb039ab2c84ad824c5
|
"""
Plugin architecture for pymatgen.
"""
import pkg_resources
discovered_plugins = {
entry_point.name: entry_point.load() for entry_point in pkg_resources.iter_entry_points("pymatgen.plugins")
}
locals().update(discovered_plugins)
|
davidwaroquiers/pymatgen
|
pymatgen/plugins/__init__.py
|
Python
|
mit
| 239
|
[
"pymatgen"
] |
5c5c36d7b72295864c4c70f1fb5b188dcfc41da0ab1577f2f6da31878717b289
|
""" PDP
PDP ( PolicyDecisionPoint ) is the back-end for the PolicySystem. It discovers
the policies, finds the best match, evaluates them, merges their results taking
the most penalizing one, computes the set of actions to be triggered and returns
all the information to the PEP which will enforce the actions.
"""
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.ResourceStatusSystem.PolicySystem.PolicyCaller import PolicyCaller
from DIRAC.ResourceStatusSystem.PolicySystem.StateMachine import RSSMachine
from DIRAC.ResourceStatusSystem.Utilities import RssConfiguration
from DIRAC.ResourceStatusSystem.Utilities.InfoGetter import getPolicyActionsThatApply, getPoliciesThatApply
__RCSID__ = '$Id$'
class PDP(object):
""" PDP ( Policy Decision Point )
"""
def __init__(self, clients=None):
""" Constructor.
examples:
>>> pdp = PDP( None )
>>> pdp1 = PDP( {} )
>>> pdp2 = PDP( { 'Client1' : Client1Object } )
:Parameters:
**clients** - [ None, `dict` ]
dictionary with Clients to be used in the Commands. If None, the Commands
will create their own clients.
"""
# decision parameters used to match policies and actions
self.decisionParams = None
# Helpers to discover policies and RSS metadata in CS
self.pCaller = PolicyCaller(clients)
# RSS State Machine, used to calculate most penalizing state while merging them
self.rssMachine = RSSMachine('Unknown')
self.log = gLogger.getSubLogger('PDP')
def setup(self, decisionParams=None):
""" method that sanitizes the decisionParams and ensures that at least it has
the keys in `standardParamsDict`. This will be relevant while doing the matching
with the RSS Policies configuration in the CS.
There is one key-value pair, `active` which is added on this method. This allows
policies to be de-activated from the CS, changing their active matchParam to
something else than `Active`.
examples:
>>> pdp.setup( None )
>>> self.decisionParams
{ 'element' : None, 'name' : None, ... }
>>> pdp.setup( { 'element' : 'AnElement' } )
>>> self.decisionParams
{ 'element' : 'AnElement', 'name' : None, ... }
>>> pdp.setup( { 'NonStandardKey' : 'Something' } )
>>> self.decisionParams
{ 'NonStandardKey' : 'Something', 'element' : None,... }
:Parameters:
**decisionParams** - [ None, `dict` ]
dictionary with the parameters to be matched with the RSS Policies configuration
in the CS.
"""
standardParamsDict = {'element': None,
'name': None,
'elementType': None,
'statusType': None,
'status': None,
'reason': None,
'tokenOwner': None,
# Last parameter allows policies to be de-activated
'active': 'Active'}
if decisionParams is not None:
standardParamsDict.update(decisionParams)
if standardParamsDict['element'] is not None:
self.log = gLogger.getSubLogger('PDP/%s' % standardParamsDict['element'])
if standardParamsDict['name'] is not None:
self.log = gLogger.getSubLogger('PDP/%s/%s' % (standardParamsDict['element'], standardParamsDict['name']))
self.log.verbose("Setup - statusType: %s, status: %s" % (standardParamsDict['statusType'],
standardParamsDict['status']))
self.decisionParams = standardParamsDict
def takeDecision(self):
""" main PDP method which does all the work. If firstly finds all the policies
defined in the CS that match <self.decisionParams> and runs them. Once it has
all the singlePolicyResults, it combines them. Next step is action discovery:
using a similar approach to the one used to discover the policies, but also
taking into account the single policy results and their combined result, finds
the actions to be triggered and returns.
examples:
>>> pdp.takeDecision()['Value'].keys()
['singlePolicyResults', 'policyCombinedResult', 'decisionParams']
>>> pdp.takeDecision()['Value']['singlePolicyResults']
[ { 'Status' : 'Active',
'Reason' : 'blah',
'Policy' : { 'name' : 'AlwaysActiveForResource',
'type' : 'AlwaysActive',
'module' : 'AlwaysActivePolicy',
'description' : 'This is the AlwaysActive policy'
'command' : None,
'args' : {}
}
}, ... ]
>>> pdp.takeDecision()['Value']['policyCombinedResult']
{ 'Status' : 'Active',
'Reason' : 'blah ###',
'PolicyAction' : [ ( 'policyActionName1', 'policyActionType1' ), ... ]
}
:return: S_OK( { 'singlePolicyResults' : `list`,
'policyCombinedResult' : `dict`,
'decisionParams' : `dict` } ) / S_ERROR
"""
if self.decisionParams is None:
return S_OK({'singlePolicyResults': [],
'policyCombinedResult': {},
'decisionParams': self.decisionParams})
self.log.verbose("Taking decision")
# Policies..................................................................
# Get policies that match self.decisionParams
policiesThatApply = getPoliciesThatApply(self.decisionParams)
if not policiesThatApply['OK']:
return policiesThatApply
policiesThatApply = policiesThatApply['Value']
self.log.verbose("Policies that apply: %s" % ', '.join([po['name'] for po in policiesThatApply]))
# Evaluate policies
singlePolicyResults = self._runPolicies(policiesThatApply)
if not singlePolicyResults['OK']:
return singlePolicyResults
singlePolicyResults = singlePolicyResults['Value']
self.log.verbose("Single policy results: %s" % singlePolicyResults)
# Combine policies and get most penalizing status ( see RSSMachine )
policyCombinedResults = self._combineSinglePolicyResults(singlePolicyResults)
if not policyCombinedResults['OK']:
return policyCombinedResults
policyCombinedResults = policyCombinedResults['Value']
self.log.verbose("Combined policy result: %s" % policyCombinedResults)
# Actions...................................................................
policyActionsThatApply = getPolicyActionsThatApply(self.decisionParams,
singlePolicyResults,
policyCombinedResults)
if not policyActionsThatApply['OK']:
return policyActionsThatApply
policyActionsThatApply = policyActionsThatApply['Value']
self.log.verbose("Policy actions that apply: %s" % ','.join(pata[0] for pata in policyActionsThatApply))
policyCombinedResults['PolicyAction'] = policyActionsThatApply
return S_OK({'singlePolicyResults': singlePolicyResults,
'policyCombinedResult': policyCombinedResults,
'decisionParams': self.decisionParams})
def _runPolicies(self, policies):
""" Given a list of policy dictionaries, loads them making use of the PolicyCaller
and evaluates them. This method requires to have run setup previously.
examples:
>>> pdp._runPolicies([])['Value']
[]
>>> policyDict = { 'name' : 'AlwaysActiveResource',
'type' : 'AlwaysActive',
'args' : None,
'description' : 'This is the AlwaysActive policy',
'module' : 'AlwaysActivePolicy',
'command' : None }
>>> pdp._runPolicies([ policyDict, ... ] )['Value']
[ { 'Status' : 'Active', 'Reason' : 'blah', 'Policy' : policyDict }, ... ]
:Parameters:
**policies** - `list( dict )`
list of dictionaries containing the policies selected to be run. Check the
examples to get an idea of how the policy dictionaries look like.
:return: S_OK() / S_ERROR
"""
policyInvocationResults = []
# Gets all valid status for RSS to avoid misconfigured policies returning statuses
# that RSS does not understand.
validStatus = self.rssMachine.getStates()
for policyDict in policies:
# Load and evaluate policy described in <policyDict> for element described
# in <self.decisionParams>
policyInvocationResult = self.pCaller.policyInvocation(self.decisionParams,
policyDict)
if not policyInvocationResult['OK']:
# We should never enter this line ! Just in case there are policies
# missconfigured !
_msg = 'runPolicies no OK: %s' % policyInvocationResult
self.log.error(_msg)
return S_ERROR(_msg)
policyInvocationResult = policyInvocationResult['Value']
# Sanity Checks ( they should never happen ! )
if 'Status' not in policyInvocationResult:
_msg = 'runPolicies (no Status): %s' % policyInvocationResult
self.log.error(_msg)
return S_ERROR(_msg)
if not policyInvocationResult['Status'] in validStatus:
_msg = 'runPolicies ( not valid status ) %s' % policyInvocationResult['Status']
self.log.error(_msg)
return S_ERROR(_msg)
if 'Reason' not in policyInvocationResult:
_msg = 'runPolicies (no Reason): %s' % policyInvocationResult
self.log.error(_msg)
return S_ERROR(_msg)
policyInvocationResults.append(policyInvocationResult)
return S_OK(policyInvocationResults)
def _combineSinglePolicyResults(self, singlePolicyRes):
""" method that merges all the policies results into a combined one, which
will be the most penalizing status and the reasons of the single policy
results that returned the same penalizing status. All the rest, are ignored.
If there are no single policy results, it is returned `Unknown` state. While
combining policies, the ones containing the option `doNotCombine` are ignored.
examples:
>>> pdp._combineSingePolicyResults( [] )['Value']
{ 'Status' : 'Unknown', 'Reason' : 'No policy ..' }
>>> pdp._combineSingePolicyResults( [ { 'Status' : 'Active', 'Reason' : 'blah', 'Policy' : policyDict } ] )
{ 'Status' : 'Active', 'Reason' : 'blah' }
>>> pdp._combineSingePolicyResults( [ { 'Status' : 'Active', 'Reason' : 'blah', 'Policy' : policyDict },
{ 'Status' : 'Banned', 'Reason' : 'blah 2', 'Policy' : policyDict2 } ] )
{ 'Status' : 'Banned', 'Reason' : 'blah 2' }
>>> pdp._combineSingePolicyResults( [ { 'Status' : 'Active', 'Reason' : 'blah', 'Policy' : policyDict },
{ 'Status' : 'Active', 'Reason' : 'blah 2', 'Policy' : policyDict2 } ] )
{ 'Status' : 'Banned', 'Reason' : 'blah ### blah 2' }
:Parameters:
**singlePolicyRes** - `list( dict )`
list with every single policy result to be combined ( see _runPolicy for more details )
:return: S_OK( dict( Status, Reason ) | S_ERROR
"""
# Dictionary to be returned
policyCombined = {'Status': 'Unknown', # default, it should be overridden by the policies, if they exist
'Reason': ''}
# If there are no policyResults, we return Unknown
if not singlePolicyRes:
policyCombined['Reason'] = 'No policy applies to %(element)s, %(name)s, %(elementType)s' % self.decisionParams
self.log.warn(policyCombined['Reason'])
return S_OK(policyCombined)
# We set the rssMachine on the current state ( ensures it is a valid one )
# FIXME: probably this check can be done at takeDecision
machineStatus = self.rssMachine.setState(self.decisionParams['status'])
if not machineStatus['OK']:
return machineStatus
# Discard all single policy results which belongs to policies that have set
# the option `doNotCombine` in the CS
policiesToCombine = self._findPoliciesToCombine(singlePolicyRes)
# Sort policy results using ther statuses by most restrictive ( lower level first )
self.rssMachine.orderPolicyResults(policiesToCombine)
# As they have been sorted by most restrictive status, the first one is going
# to be our candidate new state. Let's ask the RSSMachine if it allows us to
# make such transition.
candidateState = policiesToCombine[0]['Status']
nextState = self.rssMachine.getNextState(candidateState)
if not nextState['OK']:
return nextState
nextState = nextState['Value']
# If the RssMachine does not accept the candidate, return forcing message
if candidateState != nextState:
policyCombined['Status'] = nextState
policyCombined['Reason'] = 'RssMachine forced status %s to %s' % (candidateState, nextState)
return S_OK(policyCombined)
# If the RssMachine accepts the candidate, just concatenate the reasons
for policyRes in policiesToCombine:
if policyRes['Status'] == nextState:
policyCombined['Reason'] += '%s ###' % policyRes['Reason']
policyCombined['Status'] = nextState
return S_OK(policyCombined)
def _findPoliciesToCombine(self, singlePolicyRes):
""" method that iterates over the single policy results and checks the CS
configuration of the policies looking for the option 'doNotCombine'. If it is
present, that single policy result is discarded.
:Parameters:
**singlePolicyRes** - `list( dict )`
list with every single policy result to be combined ( see _runPolicy for more details )
:return: `list( dict )`
"""
# Get policies configuration from the CS. We want to exclude the policies that
# have set the option `doNotCombine` from this process.
policiesConfiguration = RssConfiguration.getPolicies()
if not policiesConfiguration['OK']:
return policiesConfiguration
policiesConfiguration = policiesConfiguration['Value']
# Function that let's us know if we should combine the result of a single policy
# or not.
def combinePolicy(policyResult):
# Extract policy name from the dictionary returned by PolicyCaller
policyName = policyResult['Policy']['name']
try:
# If doNotCombineResult is defined, the policy is not taken into account
# to create the combined result. However, the single policy result remains
_ = policiesConfiguration[policyName]['doNotCombineResult']
return False
except KeyError:
return True
# Make a list of policies of which we want to merge their results
return [policyResult for policyResult in singlePolicyRes if combinePolicy(policyResult)]
# ...............................................................................
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
fstagni/DIRAC
|
ResourceStatusSystem/PolicySystem/PDP.py
|
Python
|
gpl-3.0
| 15,211
|
[
"DIRAC"
] |
615655dc87fc66fa4ffbf3eda22454219a0f32f72e985eb02e0bd07c08cc84a2
|
# -*- coding: utf-8 -*-
"""
Shortest paths and path lengths using A* ("A star") algorithm.
"""
__author__ =\
"""Salim Fadhley <salimfadhley@gmail.com>
Matteo Dell'Amico <matteodellamico@gmail.com>"""
# Copyright (C) 2004-2008 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from heapq import heappush, heappop
from networkx import NetworkXError
import networkx as nx
__all__ = ['astar_path','astar_path_length']
def astar_path(G, source, target, heuristic=None):
"""Return a list of nodes in a shortest path between source and target using the A* ("A-star") algorithm.
There may be more than one shortest path. This returns only one.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path
target : node
Ending node for path
heuristic : function
A function to evaluate the estimate of the distance
from the a node to the target. The function takes
two nodes arguments and must return a number.
Examples
--------
>>> G=nx.path_graph(5)
>>> print(nx.astar_path(G,0,4))
[0, 1, 2, 3, 4]
>>> G=nx.grid_graph(dim=[3,3]) # nodes are two-tuples (x,y)
>>> def dist(a, b):
... (x1, y1) = a
... (x2, y2) = b
... return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
>>> print(nx.astar_path(G,(0,0),(2,2),dist))
[(0, 0), (0, 1), (1, 1), (1, 2), (2, 2)]
See Also
--------
shortest_path(), dijkstra_path()
"""
if G.is_multigraph():
raise NetworkXError("astar_path() not implemented for Multi(Di)Graphs")
if heuristic is None:
# The default heuristic is h=0 - same as Dijkstra's algorithm
def heuristic(u,v):
return 0
# The queue stores priority, node, cost to reach, and parent.
# Uses Python heapq to keep in priority order.
queue = [(0, source, 0, None)]
# Maps enqueued nodes to distance of discovered paths and the
# computed heuristics to target. We avoid computing the heuristics
# more than once and inserting the node into the queue too many times.
enqueued = {}
# Maps explored nodes to parent closest to the source.
explored = {}
while queue:
# Pop the smallest item from queue.
_, curnode, dist, parent = heappop(queue)
if curnode == target:
path = [curnode]
node = parent
while node is not None:
path.append(node)
node = explored[node]
path.reverse()
return path
if curnode in explored:
continue
explored[curnode] = parent
for neighbor, w in G[curnode].items():
if neighbor in explored:
continue
ncost = dist + w.get('weight',1)
if neighbor in enqueued:
qcost, h = enqueued[neighbor]
# if qcost < ncost, a longer path to neighbor remains
# enqueued. Removing it would need to filter the whole
# queue, it's better just to leave it there and ignore
# it when we visit the node a second time.
if qcost <= ncost:
continue
else:
h = heuristic(neighbor, target)
enqueued[neighbor] = ncost, h
heappush(queue, (ncost + h, neighbor, ncost, curnode))
raise NetworkXError("Node %s not reachable from %s"%(source,target))
def astar_path_length(G, source, target, heuristic=None):
"""Return a list of nodes in a shortest path between source and target using the A* ("A-star") algorithm.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path
target : node
Ending node for path
heuristic : function
A function to evaluate the estimate of the distance
from the a node to the target. The function takes
two nodes arguments and must return a number.
See Also
--------
astar_path()
"""
# FIXME: warn if G.weighted==False
path=astar_path(G,source,target,heuristic)
return sum(G[u][v].get('weight',1) for u,v in zip(path[:-1],path[1:]))
|
marco-mariotti/selenoprofiles
|
libraries/networkx/algorithms/shortest_paths/astar.py
|
Python
|
gpl-2.0
| 4,301
|
[
"VisIt"
] |
4d53919a6414dcba00cfd92b40985ab0f6f0841b7c0dc1c8abd85bc1539a37a8
|
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2012-2013 Esteban Tovagliari, Jupiter Jazz Limited
# Copyright (c) 2014-2017 Esteban Tovagliari, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from sys import hexversion as appleseed_python_hexversion
if appleseed_python_hexversion < 0x030000F0:
# Python 2.x
from _appleseedpython import ILogTarget
else:
# Python 3.x
from ._appleseedpython import ILogTarget
class ConsoleLogTarget(ILogTarget):
def __init__(self, stream):
ILogTarget.__init__(self)
self.__stream = stream
def write(self, category, file, line, header, message):
lines = message.split('\n')
for line in lines:
self.__stream.write(header + line + '\n')
class FileLogTarget(ILogTarget):
def __init__(self):
ILogTarget.__init__(self)
self.__file = None
def open(self, filename):
if self.is_open():
self.close()
self.__file = open(filename, "w")
def close(self):
if self.is_open():
self.__file.close()
self.__file = None
def is_open(self):
return self.__file != None
def write(self, category, file, line, header, message):
if self.is_open():
lines = message.split('\n')
for line in lines:
self.__file.write(header + line + '\n')
|
gospodnetic/appleseed
|
src/appleseed.python/logtarget.py
|
Python
|
mit
| 2,555
|
[
"VisIt"
] |
10cd178b9b86a03edb8df4ea3c17164b305c6da0a70b74873766729c4e86c90d
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Esmf(MakefilePackage):
"""The Earth System Modeling Framework (ESMF) is high-performance, flexible
software infrastructure for building and coupling weather, climate, and
related Earth science applications. The ESMF defines an architecture for
composing complex, coupled modeling systems and includes data structures
and utilities for developing individual models."""
homepage = "https://www.earthsystemcog.org/projects/esmf/"
url = 'https://github.com/esmf-org/esmf/archive/ESMF_8_0_1.tar.gz'
version('8.2.0', sha256='3693987aba2c8ae8af67a0e222bea4099a48afe09b8d3d334106f9d7fc311485')
version('8.1.1', sha256='58c2e739356f21a1b32673aa17a713d3c4af9d45d572f4ba9168c357d586dc75')
version('8.0.1', sha256='9172fb73f3fe95c8188d889ee72fdadb4f978b1d969e1d8e401e8d106def1d84')
version('8.0.0', sha256='051dca45f9803d7e415c0ea146df15ce487fb55f0fce18ca61d96d4dba0c8774')
version('7.1.0r', sha256='ae9a5edb8d40ae97a35cbd4bd00b77061f995c77c43d36334dbb95c18b00a889')
variant('mpi', default=True, description='Build with MPI support')
variant('external-lapack', default=False, description='Build with external LAPACK support')
variant('netcdf', default=True, description='Build with NetCDF support')
variant('pnetcdf', default=True, description='Build with pNetCDF support')
variant('xerces', default=True, description='Build with Xerces support')
variant('pio', default=True, description='Enable ParallelIO support')
variant('debug', default=False, description='Make a debuggable version of the library')
# Required dependencies
depends_on('zlib')
depends_on('libxml2')
# Optional dependencies
depends_on('mpi', when='+mpi')
depends_on('lapack@3:', when='+external-lapack')
depends_on('netcdf-c@3.6:', when='+netcdf')
depends_on('netcdf-fortran@3.6:', when='+netcdf')
depends_on('parallel-netcdf@1.2.0:', when='+pnetcdf')
depends_on('xerces-c@3.1.0:', when='+xerces')
# Testing dependencies
depends_on('perl', type='test')
# Make esmf build with newer intel versions
patch('intel.patch', when='@:7.0 %intel@17:')
# Make esmf build with newer gcc versions
# https://sourceforge.net/p/esmf/esmf/ci/3706bf758012daebadef83d6575c477aeff9c89b/
patch('gcc.patch', when='@:7.0 %gcc@6:')
# Fix undefined reference errors with mvapich2
# https://sourceforge.net/p/esmf/esmf/ci/34de0ccf556ba75d35c9687dae5d9f666a1b2a18/
patch('mvapich2.patch', when='@:7.0')
# Allow different directories for creation and
# installation of dynamic libraries on OSX:
patch('darwin_dylib_install_name.patch', when='platform=darwin @:7.0')
# Missing include file for newer gcc compilers
# https://trac.macports.org/ticket/57493
patch('cstddef.patch', when='@7.1.0r %gcc@8:')
# Make script from mvapich2.patch executable
@when('@:7.0')
@run_before('build')
def chmod_scripts(self):
chmod = which('chmod')
chmod('+x', 'scripts/libs.mvapich2f90')
def url_for_version(self, version):
if version < Version('8.0.0'):
return "http://www.earthsystemmodeling.org/esmf_releases/public/ESMF_{0}/esmf_{0}_src.tar.gz".format(version.underscored)
else:
return "https://github.com/esmf-org/esmf/archive/ESMF_{0}.tar.gz".format(version.underscored)
def edit(self, spec, prefix):
# Installation instructions can be found at:
# http://www.earthsystemmodeling.org/esmf_releases/last_built/ESMF_usrdoc/node9.html
# Unset any environment variables that may influence the installation.
for var in os.environ:
if var.startswith('ESMF_'):
os.environ.pop(var)
######################################
# Build and Installation Directories #
######################################
# The environment variable ESMF_DIR must be set to the full pathname
# of the top level ESMF directory before building the framework.
os.environ['ESMF_DIR'] = os.getcwd()
# This variable specifies the prefix of the installation path used
# with the install target.
os.environ['ESMF_INSTALL_PREFIX'] = prefix
# Installation subdirectories default to:
# bin/binO/Linux.gfortran.64.default.default
os.environ['ESMF_INSTALL_BINDIR'] = 'bin'
os.environ['ESMF_INSTALL_LIBDIR'] = 'lib'
os.environ['ESMF_INSTALL_MODDIR'] = 'include'
# Allow compiler flags to carry through from compiler spec
os.environ['ESMF_CXXCOMPILEOPTS'] = \
' '.join(spec.compiler_flags['cxxflags'])
os.environ['ESMF_F90COMPILEOPTS'] = \
' '.join(spec.compiler_flags['fflags'])
# ESMF will simply not build with Intel using backing GCC 8, in that
# case you need to point to something older, below is commented but is
# an example
# os.environ['ESMF_CXXCOMPILEOPTS'] = \
# '-O2 -std=c++11 -gcc-name=/usr/bin/gcc'
# os.environ['ESMF_F90COMPILEOPTS'] = \
# '-O2 -gcc-name=/usr/bin/gcc'
############
# Compiler #
############
# ESMF_COMPILER must be set to select which Fortran and
# C++ compilers are being used to build the ESMF library.
if self.compiler.name == 'gcc':
os.environ['ESMF_COMPILER'] = 'gfortran'
elif self.compiler.name == 'intel':
os.environ['ESMF_COMPILER'] = 'intel'
elif self.compiler.name == 'clang':
os.environ['ESMF_COMPILER'] = 'gfortranclang'
elif self.compiler.name == 'nag':
os.environ['ESMF_COMPILER'] = 'nag'
elif self.compiler.name == 'pgi':
os.environ['ESMF_COMPILER'] = 'pgi'
else:
msg = "The compiler you are building with, "
msg += "'{0}', is not supported by ESMF."
raise InstallError(msg.format(self.compiler.name))
if '+mpi' in spec:
os.environ['ESMF_CXX'] = spec['mpi'].mpicxx
os.environ['ESMF_F90'] = spec['mpi'].mpifc
else:
os.environ['ESMF_CXX'] = os.environ['CXX']
os.environ['ESMF_F90'] = os.environ['FC']
# This environment variable controls the build option.
if '+debug' in spec:
# Build a debuggable version of the library.
os.environ['ESMF_BOPT'] = 'g'
else:
# Build an optimized version of the library.
os.environ['ESMF_BOPT'] = 'O'
if self.spec.satisfies('%gcc@10:'):
os.environ['ESMF_F90COMPILEOPTS'] = '-fallow-argument-mismatch'
#######
# MPI #
#######
# ESMF_COMM must be set to indicate which MPI implementation
# is used to build the ESMF library.
if '+mpi' in spec:
if 'platform=cray' in self.spec:
os.environ['ESMF_COMM'] = 'mpi'
elif '^mvapich2' in spec:
os.environ['ESMF_COMM'] = 'mvapich2'
elif '^mpich' in spec:
# esmf@7.0.1 does not include configs for mpich3,
# so we start with the configs for mpich2:
os.environ['ESMF_COMM'] = 'mpich2'
# The mpich 3 series split apart the Fortran and C bindings,
# so we link the Fortran libraries when building C programs:
os.environ['ESMF_CXXLINKLIBS'] = '-lmpifort'
elif '^openmpi' in spec:
os.environ['ESMF_COMM'] = 'openmpi'
elif '^intel-parallel-studio+mpi' in spec or \
'^intel-mpi' in spec or \
'^intel-oneapi-mpi' in spec:
os.environ['ESMF_COMM'] = 'intelmpi'
else:
# Force use of the single-processor MPI-bypass library.
os.environ['ESMF_COMM'] = 'mpiuni'
##########
# LAPACK #
##########
if '+external-lapack' in spec:
# A system-dependent external LAPACK/BLAS installation is used
# to satisfy the external dependencies of the LAPACK-dependent
# ESMF code.
os.environ['ESMF_LAPACK'] = 'system'
# FIXME: determine whether or not we need to set this
# Specifies the path where the LAPACK library is located.
# os.environ['ESMF_LAPACK_LIBPATH'] = spec['lapack'].prefix.lib
# Specifies the linker directive needed to link the LAPACK library
# to the application.
os.environ['ESMF_LAPACK_LIBS'] = spec['lapack'].libs.link_flags # noqa
else:
os.environ['ESMF_LAPACK'] = 'internal'
##########
# NetCDF #
##########
if '+netcdf' in spec:
# ESMF provides the ability to read Grid and Mesh data in
# NetCDF format.
if spec.satisfies('^netcdf-c@4.2:'):
# ESMF_NETCDF_LIBS will be set to "-lnetcdff -lnetcdf".
# This option is useful for systems which have the Fortran
# and C bindings archived in seperate library files.
os.environ['ESMF_NETCDF'] = 'split'
else:
# ESMF_NETCDF_LIBS will be set to "-lnetcdf".
# This option is useful when the Fortran and C bindings
# are archived together in the same library file.
os.environ['ESMF_NETCDF'] = 'standard'
# FIXME: determine whether or not we need to set these.
# ESMF_NETCDF_INCLUDE
# ESMF_NETCDF_LIBPATH
###################
# Parallel-NetCDF #
###################
if '+pnetcdf' in spec:
# ESMF provides the ability to write Mesh weights
# using Parallel-NetCDF.
# When defined, enables the use of Parallel-NetCDF.
# ESMF_PNETCDF_LIBS will be set to "-lpnetcdf".
os.environ['ESMF_PNETCDF'] = 'standard'
# FIXME: determine whether or not we need to set these.
# ESMF_PNETCDF_INCLUDE
# ESMF_PNETCDF_LIBPATH
##############
# ParallelIO #
##############
if '+pio' in spec and '+mpi' in spec:
# ESMF provides the ability to read and write data in both binary
# and NetCDF formats through ParallelIO (PIO), a third-party IO
# software library that is integrated in the ESMF library.
# PIO-dependent features will be enabled and will use the
# PIO library that is included and built with ESMF.
os.environ['ESMF_PIO'] = 'internal'
else:
# Disables PIO-dependent code.
os.environ['ESMF_PIO'] = 'OFF'
##########
# XERCES #
##########
if '+xerces' in spec:
# ESMF provides the ability to read Attribute data in
# XML file format via the XERCES C++ library.
# ESMF_XERCES_LIBS will be set to "-lxerces-c".
os.environ['ESMF_XERCES'] = 'standard'
# FIXME: determine if the following are needed
# ESMF_XERCES_INCLUDE
# ESMF_XERCES_LIBPATH
def check(self):
make('check', parallel=False)
|
LLNL/spack
|
var/spack/repos/builtin/packages/esmf/package.py
|
Python
|
lgpl-2.1
| 11,522
|
[
"NetCDF"
] |
18565d25e0342d7b1b488132473fa1c272264ee9f60970ede153970bd9789259
|
#!/usr/bin/env python
# This example shows how to construct a surface from a point cloud.
# First we generate a volume using the
# vtkSurfaceReconstructionFilter. The volume values are a distance
# field. Once this is generated, the volume is countoured at a
# distance value of 0.0.
import os
import string
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Read some points. Use a programmable filter to read them.
pointSource = vtk.vtkProgrammableSource()
def readPoints():
output = pointSource.GetPolyDataOutput()
points = vtk.vtkPoints()
output.SetPoints(points)
file = open(os.path.normpath(os.path.join(VTK_DATA_ROOT, "Data/cactus.3337.pts")))
line = file.readline()
while line:
data = string.split(line)
if data and data[0] == 'p':
x, y, z = float(data[1]), float(data[2]), float(data[3])
points.InsertNextPoint(x, y, z)
line = file.readline()
pointSource.SetExecuteMethod(readPoints)
# Construct the surface and create isosurface.
surf = vtk.vtkSurfaceReconstructionFilter()
surf.SetInputConnection(pointSource.GetOutputPort())
cf = vtk.vtkContourFilter()
cf.SetInputConnection(surf.GetOutputPort())
cf.SetValue(0, 0.0)
# Sometimes the contouring algorithm can create a volume whose gradient
# vector and ordering of polygon (using the right hand rule) are
# inconsistent. vtkReverseSense cures this problem.
reverse = vtk.vtkReverseSense()
reverse.SetInputConnection(cf.GetOutputPort())
reverse.ReverseCellsOn()
reverse.ReverseNormalsOn()
map = vtk.vtkPolyDataMapper()
map.SetInputConnection(reverse.GetOutputPort())
map.ScalarVisibilityOff()
surfaceActor = vtk.vtkActor()
surfaceActor.SetMapper(map)
surfaceActor.GetProperty().SetDiffuseColor(1.0000, 0.3882, 0.2784)
surfaceActor.GetProperty().SetSpecularColor(1, 1, 1)
surfaceActor.GetProperty().SetSpecular(.4)
surfaceActor.GetProperty().SetSpecularPower(50)
# Create the RenderWindow, Renderer and both Actors
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
ren.AddActor(surfaceActor)
ren.SetBackground(1, 1, 1)
renWin.SetSize(400, 400)
ren.GetActiveCamera().SetFocalPoint(0, 0, 0)
ren.GetActiveCamera().SetPosition(1, 0, 0)
ren.GetActiveCamera().SetViewUp(0, 0, 1)
ren.ResetCamera()
ren.GetActiveCamera().Azimuth(20)
ren.GetActiveCamera().Elevation(30)
ren.GetActiveCamera().Dolly(1.2)
ren.ResetCameraClippingRange()
iren.Initialize()
renWin.Render()
iren.Start()
|
naucoin/VTKSlicerWidgets
|
Examples/Modelling/Python/reconstructSurface.py
|
Python
|
bsd-3-clause
| 2,614
|
[
"VTK"
] |
ab5df1545e698a5af1d8903d51a6963e79adea15ffd108c1d3462fce02ef318d
|
import warnings
from pandas import DateOffset, DatetimeIndex, Series, Timestamp
from pandas.compat import add_metaclass
from datetime import datetime, timedelta
from dateutil.relativedelta import MO, TU, WE, TH, FR, SA, SU # noqa
from pandas.tseries.offsets import Easter, Day
import numpy as np
def next_monday(dt):
"""
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday, use Monday instead
"""
if dt.weekday() == 5:
return dt + timedelta(2)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
def next_monday_or_tuesday(dt):
"""
For second holiday of two adjacent ones!
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday or Monday, use following Tuesday instead
(because Monday is already taken by adjacent holiday on the day before)
"""
dow = dt.weekday()
if dow == 5 or dow == 6:
return dt + timedelta(2)
elif dow == 0:
return dt + timedelta(1)
return dt
def previous_friday(dt):
"""
If holiday falls on Saturday or Sunday, use previous Friday instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt - timedelta(2)
return dt
def sunday_to_monday(dt):
"""
If holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 6:
return dt + timedelta(1)
return dt
def weekend_to_monday(dt):
"""
If holiday falls on Sunday or Saturday,
use day thereafter (Monday) instead.
Needed for holidays such as Christmas observation in Europe
"""
if dt.weekday() == 6:
return dt + timedelta(1)
elif dt.weekday() == 5:
return dt + timedelta(2)
return dt
def nearest_workday(dt):
"""
If holiday falls on Saturday, use day before (Friday) instead;
if holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
def next_workday(dt):
"""
returns next weekday used for observances
"""
dt += timedelta(days=1)
while dt.weekday() > 4:
# Mon-Fri are 0-4
dt += timedelta(days=1)
return dt
def previous_workday(dt):
"""
returns previous weekday used for observances
"""
dt -= timedelta(days=1)
while dt.weekday() > 4:
# Mon-Fri are 0-4
dt -= timedelta(days=1)
return dt
def before_nearest_workday(dt):
"""
returns previous workday after nearest workday
"""
return previous_workday(nearest_workday(dt))
def after_nearest_workday(dt):
"""
returns next workday after nearest workday
needed for Boxing day or multiple holidays in a series
"""
return next_workday(nearest_workday(dt))
class Holiday(object):
"""
Class that defines a holiday with start/end dates and rules
for observance.
"""
def __init__(self, name, year=None, month=None, day=None, offset=None,
observance=None, start_date=None, end_date=None,
days_of_week=None):
"""
Parameters
----------
name : str
Name of the holiday , defaults to class name
offset : array of pandas.tseries.offsets or
class from pandas.tseries.offsets
computes offset from date
observance: function
computes when holiday is given a pandas Timestamp
days_of_week:
provide a tuple of days e.g (0,1,2,3,) for Monday Through Thursday
Monday=0,..,Sunday=6
Examples
--------
>>> from pandas.tseries.holiday import Holiday, nearest_workday
>>> from pandas import DateOffset
>>> from dateutil.relativedelta import MO
>>> USMemorialDay = Holiday('MemorialDay', month=5, day=24,
offset=DateOffset(weekday=MO(1)))
>>> USLaborDay = Holiday('Labor Day', month=9, day=1,
offset=DateOffset(weekday=MO(1)))
>>> July3rd = Holiday('July 3rd', month=7, day=3,)
>>> NewYears = Holiday('New Years Day', month=1, day=1,
observance=nearest_workday),
>>> July3rd = Holiday('July 3rd', month=7, day=3,
days_of_week=(0, 1, 2, 3))
"""
if offset is not None and observance is not None:
raise NotImplementedError("Cannot use both offset and observance.")
self.name = name
self.year = year
self.month = month
self.day = day
self.offset = offset
self.start_date = Timestamp(
start_date) if start_date is not None else start_date
self.end_date = Timestamp(
end_date) if end_date is not None else end_date
self.observance = observance
assert (days_of_week is None or type(days_of_week) == tuple)
self.days_of_week = days_of_week
def __repr__(self):
info = ''
if self.year is not None:
info += 'year={year}, '.format(year=self.year)
info += 'month={mon}, day={day}, '.format(mon=self.month, day=self.day)
if self.offset is not None:
info += 'offset={offset}'.format(offset=self.offset)
if self.observance is not None:
info += 'observance={obs}'.format(obs=self.observance)
repr = 'Holiday: {name} ({info})'.format(name=self.name, info=info)
return repr
def dates(self, start_date, end_date, return_name=False):
"""
Calculate holidays observed between start date and end date
Parameters
----------
start_date : starting date, datetime-like, optional
end_date : ending date, datetime-like, optional
return_name : bool, optional, default=False
If True, return a series that has dates and holiday names.
False will only return dates.
"""
start_date = Timestamp(start_date)
end_date = Timestamp(end_date)
filter_start_date = start_date
filter_end_date = end_date
if self.year is not None:
dt = Timestamp(datetime(self.year, self.month, self.day))
if return_name:
return Series(self.name, index=[dt])
else:
return [dt]
dates = self._reference_dates(start_date, end_date)
holiday_dates = self._apply_rule(dates)
if self.days_of_week is not None:
holiday_dates = holiday_dates[np.in1d(holiday_dates.dayofweek,
self.days_of_week)]
if self.start_date is not None:
filter_start_date = max(self.start_date.tz_localize(
filter_start_date.tz), filter_start_date)
if self.end_date is not None:
filter_end_date = min(self.end_date.tz_localize(
filter_end_date.tz), filter_end_date)
holiday_dates = holiday_dates[(holiday_dates >= filter_start_date) &
(holiday_dates <= filter_end_date)]
if return_name:
return Series(self.name, index=holiday_dates)
return holiday_dates
def _reference_dates(self, start_date, end_date):
"""
Get reference dates for the holiday.
Return reference dates for the holiday also returning the year
prior to the start_date and year following the end_date. This ensures
that any offsets to be applied will yield the holidays within
the passed in dates.
"""
if self.start_date is not None:
start_date = self.start_date.tz_localize(start_date.tz)
if self.end_date is not None:
end_date = self.end_date.tz_localize(start_date.tz)
year_offset = DateOffset(years=1)
reference_start_date = Timestamp(
datetime(start_date.year - 1, self.month, self.day))
reference_end_date = Timestamp(
datetime(end_date.year + 1, self.month, self.day))
# Don't process unnecessary holidays
dates = DatetimeIndex(start=reference_start_date,
end=reference_end_date,
freq=year_offset, tz=start_date.tz)
return dates
def _apply_rule(self, dates):
"""
Apply the given offset/observance to a DatetimeIndex of dates.
Parameters
----------
dates : DatetimeIndex
Dates to apply the given offset/observance rule
Returns
-------
Dates with rules applied
"""
if self.observance is not None:
return dates.map(lambda d: self.observance(d))
if self.offset is not None:
if not isinstance(self.offset, list):
offsets = [self.offset]
else:
offsets = self.offset
for offset in offsets:
# if we are adding a non-vectorized value
# ignore the PerformanceWarnings:
with warnings.catch_warnings(record=True):
dates += offset
return dates
holiday_calendars = {}
def register(cls):
try:
name = cls.name
except:
name = cls.__name__
holiday_calendars[name] = cls
def get_calendar(name):
"""
Return an instance of a calendar based on its name.
Parameters
----------
name : str
Calendar name to return an instance of
"""
return holiday_calendars[name]()
class HolidayCalendarMetaClass(type):
def __new__(cls, clsname, bases, attrs):
calendar_class = super(HolidayCalendarMetaClass, cls).__new__(
cls, clsname, bases, attrs)
register(calendar_class)
return calendar_class
@add_metaclass(HolidayCalendarMetaClass)
class AbstractHolidayCalendar(object):
"""
Abstract interface to create holidays following certain rules.
"""
__metaclass__ = HolidayCalendarMetaClass
rules = []
start_date = Timestamp(datetime(1970, 1, 1))
end_date = Timestamp(datetime(2030, 12, 31))
_cache = None
def __init__(self, name=None, rules=None):
"""
Initializes holiday object with a given set a rules. Normally
classes just have the rules defined within them.
Parameters
----------
name : str
Name of the holiday calendar, defaults to class name
rules : array of Holiday objects
A set of rules used to create the holidays.
"""
super(AbstractHolidayCalendar, self).__init__()
if name is None:
name = self.__class__.__name__
self.name = name
if rules is not None:
self.rules = rules
def rule_from_name(self, name):
for rule in self.rules:
if rule.name == name:
return rule
return None
def holidays(self, start=None, end=None, return_name=False):
"""
Returns a curve with holidays between start_date and end_date
Parameters
----------
start : starting date, datetime-like, optional
end : ending date, datetime-like, optional
return_name : bool, optional
If True, return a series that has dates and holiday names.
False will only return a DatetimeIndex of dates.
Returns
-------
DatetimeIndex of holidays
"""
if self.rules is None:
raise Exception('Holiday Calendar {name} does not have any '
'rules specified'.format(name=self.name))
if start is None:
start = AbstractHolidayCalendar.start_date
if end is None:
end = AbstractHolidayCalendar.end_date
start = Timestamp(start)
end = Timestamp(end)
holidays = None
# If we don't have a cache or the dates are outside the prior cache, we
# get them again
if (self._cache is None or start < self._cache[0] or
end > self._cache[1]):
for rule in self.rules:
rule_holidays = rule.dates(start, end, return_name=True)
if holidays is None:
holidays = rule_holidays
else:
holidays = holidays.append(rule_holidays)
self._cache = (start, end, holidays.sort_index())
holidays = self._cache[2]
holidays = holidays[start:end]
if return_name:
return holidays
else:
return holidays.index
@staticmethod
def merge_class(base, other):
"""
Merge holiday calendars together. The base calendar
will take precedence to other. The merge will be done
based on each holiday's name.
Parameters
----------
base : AbstractHolidayCalendar
instance/subclass or array of Holiday objects
other : AbstractHolidayCalendar
instance/subclass or array of Holiday objects
"""
try:
other = other.rules
except:
pass
if not isinstance(other, list):
other = [other]
other_holidays = {holiday.name: holiday for holiday in other}
try:
base = base.rules
except:
pass
if not isinstance(base, list):
base = [base]
base_holidays = {holiday.name: holiday for holiday in base}
other_holidays.update(base_holidays)
return list(other_holidays.values())
def merge(self, other, inplace=False):
"""
Merge holiday calendars together. The caller's class
rules take precedence. The merge will be done
based on each holiday's name.
Parameters
----------
other : holiday calendar
inplace : bool (default=False)
If True set rule_table to holidays, else return array of Holidays
"""
holidays = self.merge_class(self, other)
if inplace:
self.rules = holidays
else:
return holidays
USMemorialDay = Holiday('MemorialDay', month=5, day=31,
offset=DateOffset(weekday=MO(-1)))
USLaborDay = Holiday('Labor Day', month=9, day=1,
offset=DateOffset(weekday=MO(1)))
USColumbusDay = Holiday('Columbus Day', month=10, day=1,
offset=DateOffset(weekday=MO(2)))
USThanksgivingDay = Holiday('Thanksgiving', month=11, day=1,
offset=DateOffset(weekday=TH(4)))
USMartinLutherKingJr = Holiday('Dr. Martin Luther King Jr.',
start_date=datetime(1986, 1, 1), month=1, day=1,
offset=DateOffset(weekday=MO(3)))
USPresidentsDay = Holiday('President''s Day', month=2, day=1,
offset=DateOffset(weekday=MO(3)))
GoodFriday = Holiday("Good Friday", month=1, day=1, offset=[Easter(), Day(-2)])
EasterMonday = Holiday("Easter Monday", month=1, day=1,
offset=[Easter(), Day(1)])
class USFederalHolidayCalendar(AbstractHolidayCalendar):
"""
US Federal Government Holiday Calendar based on rules specified by:
https://www.opm.gov/policy-data-oversight/
snow-dismissal-procedures/federal-holidays/
"""
rules = [
Holiday('New Years Day', month=1, day=1, observance=nearest_workday),
USMartinLutherKingJr,
USPresidentsDay,
USMemorialDay,
Holiday('July 4th', month=7, day=4, observance=nearest_workday),
USLaborDay,
USColumbusDay,
Holiday('Veterans Day', month=11, day=11, observance=nearest_workday),
USThanksgivingDay,
Holiday('Christmas', month=12, day=25, observance=nearest_workday)
]
def HolidayCalendarFactory(name, base, other,
base_class=AbstractHolidayCalendar):
rules = AbstractHolidayCalendar.merge_class(base, other)
calendar_class = type(name, (base_class,), {"rules": rules, "name": name})
return calendar_class
|
zfrenchee/pandas
|
pandas/tseries/holiday.py
|
Python
|
bsd-3-clause
| 16,264
|
[
"COLUMBUS"
] |
43e95709c0dd4dfb51174e3f46c636c5439669ab0a632237b77cba362136015a
|
# Standard Python modules
# =======================
import os
import copy
# External modules
# ================
from PyQt5.QtCore import pyqtSignal, pyqtProperty
from PyFoam.Basics.STLFile import STLFile
from PyFoam.Error import FatalErrorPyFoamException
# DICE modules
# ============
from dice.dice_extras.vis import STLLoader
from dice.dice_extras.tools import json_sync
from dice.foam_app import FoamApp
# App modules
# ============
from .modules.surfaceCheck import SurfaceCheck
from .modules.surfaceOrient import SurfaceOrient
from .modules.surfaceTransformPoints import SurfaceTransformPoints
from .modules.surfaceAutoPatch import SurfaceAutoPatch
from .modules.surfaceMeshInfo import SurfaceMeshInfo
from .modules.surfaceCoarsen import SurfaceCoarsen
from .modules.surfaceRefineRedGreen import SurfaceRefineRedGreen
from .modules.surfaceSplitByPatch import SurfaceSplitByPatch
class STL_Import(FoamApp, SurfaceCheck, SurfaceOrient,
SurfaceTransformPoints, SurfaceAutoPatch,
SurfaceMeshInfo, SurfaceCoarsen, SurfaceRefineRedGreen,
SurfaceSplitByPatch):
app_name = "STL Import"
output_types = ["stl_files"]
def __init__(self, parent, instance_name, status):
FoamApp.__init__(self, parent, instance_name, status)
SurfaceOrient.__init__(self)
SurfaceTransformPoints.__init__(self)
SurfaceAutoPatch.__init__(self)
SurfaceMeshInfo.__init__(self)
SurfaceCoarsen.__init__(self)
self.__history = []
self.__object_files = []
self.__stl_files = []
self.__stl_vis_objects = []
def load(self):
self.__history = json_sync.JsonList(self.config_path("history.json"))
self.__object_files = json_sync.JsonList(self.config_path("files.json"))
self.__stl_files = self.__load_stl_files()
self.__load_stl_vis_objects()
self.status = self.FINISHED
def run(self):
self.repeat_history()
return True
def __load_stl_files(self):
files = []
for f in self.__object_files:
stl_file = STLFile(self.current_run_path(f['filePath']))
try:
stl_file.patchInfo()
files.append(stl_file)
except FatalErrorPyFoamException:
pass
return files
def __load_stl_vis_objects(self):
stl_vo_file_names = [stl_file.file_name for stl_file in self.__stl_vis_objects]
for stl_file in self.__stl_files:
if stl_file._filename not in stl_vo_file_names:
stl_vo = STLLoader(stl_file)
self.__stl_vis_objects.append(stl_vo)
self.add_vis_object(stl_vo)
history_changed = pyqtSignal(name="historyChanged")
@pyqtProperty("QVariantList", notify=history_changed)
def history(self):
return self.__decorated_history()
def __decorated_history(self):
hist = self.__history.to_simple_list()
for i in range(len(hist)):
cmd = hist[i]['cmd'] if 'cmd' in hist[i] else ''
method = getattr(self, "run_"+cmd) if cmd != '' else None
hist[i]['doc'] = method.__doc__ if method is not None else cmd
hist[i]['parameterStr'] = str(dict(hist[i]['parameters'])) if 'parameters' in hist[i] else ''
return hist
def add_cmd_to_history(self, cmd, parameters):
"""
Adds a command and its parameters at the end of the history list.
:param cmd:
:param parameters:
:return:
"""
if self.config["recordHistory"]:
self.__history.append({"cmd": cmd, "parameters": parameters})
self.history_changed.emit()
def remove_last_history_entry(self):
"""
Removes the last command from the history and tries to undo it.
If undoing fails, the command stays in the history.
"""
entry = self.__history.pop()
if not self.__try_undo_command(entry):
self.__history.append(entry)
else:
self.history_changed.emit() # changed signal only needed when undoing worked
def __try_undo_command(self, command):
"""
Tries to undo a command by calling the function with a prepended "undo_".
If no such function exists, the whole history is repeated.
:param str command: The command that should be undone.
:return: bool: True when undoing worked, False otherwise
"""
if 'cmd' in command:
method = getattr(self, 'undo_'+command['cmd'], None)
if method is not None:
parameters = command['parameters'] if 'parameters' in command else {}
method(**parameters)
else:
return self.repeat_history()
else:
self.alert("cannot undo "+str(command))
return False
return True
def clear_history(self):
del self.__history[:]
self.history_changed.emit()
def repeat_history(self):
self.__object_files.clear() # clear file model before rerunning, or the files will show up multiple times
for item in self.__history:
if 'cmd' in item:
cmd = "run_"+item['cmd']
try:
method = getattr(self, cmd)
parameters = item['parameters'] if 'parameters' in item else {}
method(**parameters)
except AttributeError:
self.alert("cannot execute command: "+cmd)
return False
return True
def update_changed_stl_file(self, filename):
for stl in list(self.__stl_files):
if stl.filename() == filename:
self.__stl_files.remove(stl)
self.__stl_files.append(STLFile(self.current_run_path("files", filename)))
self.object_files_changed.emit()
self.stl_files_out_changed.emit()
for vis in list(self.__stl_vis_objects):
if vis.basename == filename:
self.__stl_vis_objects.remove(vis)
self.remove_vis_object(vis)
self.__load_stl_vis_objects()
object_files_changed = pyqtSignal(name="objectFilesChanged")
@property
def object_files(self):
return [{'text': stl_file.filename(),
'filePath': "files/{0}".format(stl_file.filename()),
'elements':
[{'text': region['name'], 'filePath': "files/{0}".format(stl_file.filename())}
for region in stl_file.patchInfo()]}
for stl_file in self.__stl_files
]
objectFiles = pyqtProperty("QVariantList", fget=object_files.fget, notify=object_files_changed)
def add_to_file_model(self, full_path, src, file_path):
"""
Adds a stl file to the file model.
:param full_path: The full path to the stl file
:param src: The path of the original file
:param file_path: relative file path, must start with "files/"
"""
stl_file = STLFile(full_path)
self.__stl_files.append(stl_file)
vo = STLLoader(stl_file)
self.__stl_vis_objects.append(vo)
self.add_vis_object(vo)
self.__object_files.append({'src': src, 'filePath': file_path})
self.object_files_changed.emit()
self.stl_files_out_changed.emit()
def __remove_stl_by_file_path(self, file_path):
stl_file_name = self.current_run_path(file_path)
stl_file_to_remove = next((stl_file for stl_file in self.__stl_files
if stl_file._filename == stl_file_name), None)
stl_vo_to_remove = next((stl_vo for stl_vo in self.__stl_vis_objects
if stl_vo.file_name == stl_file_name), None)
if stl_file_to_remove is not None:
self.__stl_files.remove(stl_file_to_remove)
if stl_vo_to_remove is not None:
self.remove_vis_object(stl_vo_to_remove)
self.__stl_vis_objects.remove(stl_vo_to_remove)
self.stl_files_out_changed.emit()
def remove_from_file_model(self, file_path):
"""
Removes a file from the file model. file_path should start with "files/".
:param file_path:
"""
for f in self.__object_files:
if f['filePath'] == file_path:
self.__object_files.remove(f)
self.object_files_changed.emit()
self.__remove_stl_by_file_path(file_path)
return
def remove_file_by_path(self, path):
filename = path[0]
file_path = os.path.join("files", filename)
src = next((f['src'] for f in self.__object_files if f['filePath'] == file_path), '')
self.delete_object_file(filename, src, file_path)
def import_files(self, urls):
for url in urls:
url = self.parse_url(url)
file_path = self.config_path("files")
file_name = os.path.basename(url)
if self.check_if_already_imported(file_path, file_name):
self.add_cmd_to_history('copy_object_file', {'path': url})
self.run_copy_object_file(url)
else:
self.alert("Could not import "+str(url))
def run_copy_object_file(self, path):
"""
Copies an object file (stl) into the app and run folder.
:param path: file path to copy from
:return:
"""
self.copy(path, self.config_path("files"))
self.copy(path, self.current_run_path("files"))
filename = os.path.basename(path)
self.add_to_file_model(self.current_run_path("files", filename), src=path,
file_path=os.path.join("files", filename))
def check_if_already_imported(self, path, file_name):
file_path = os.path.join(path, file_name)
return not os.path.exists(file_path)
def undo_copy_object_file(self, path):
filename = os.path.basename(path)
self.rm(self.config_path("files", filename))
self.rm(self.current_run_path("files", filename))
self.remove_from_file_model(os.path.join("files", filename))
def delete_object_file(self, filename, src, path):
self.add_cmd_to_history("delete_object_file", {'filename': filename, 'src': src, 'path': path})
self.run_delete_object_file(filename, src, path)
def run_delete_object_file(self, filename, src, path):
"""
Deletes a file from the app and run folder.
:param filename: file to delete
:return:
"""
self.undo_copy_object_file(src)
def undo_delete_object_file(self, filename, src, path):
self.run_copy_object_file(src)
def open_paraview(self):
paraview = self.dice.settings.value(self, ['ParaView', 'paraview'])
current_foam_path = self.current_run_path("view.foam")
self.run_process([paraview, current_foam_path])
def stl_files_out(self):
return copy.deepcopy(self.__stl_files)
stl_files_out_changed = pyqtSignal()
|
Leopardob/dice-dev
|
apps/OpenFOAM_230/Preprocessing/STL_Import/app.py
|
Python
|
gpl-3.0
| 11,118
|
[
"ParaView"
] |
00684f2d3c0b5dfe863e39f488f910d3c9b935bcca4ea352b7c09c609f494ecd
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.