text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
from __future__ import division, print_function, absolute_import
import sys
from random import randint, random, shuffle
from turtle import TurtleScreen, RawTurtle, TK
from contextlib import contextmanager
if sys.version_info[0] < 3:
from Tkinter import Tk, mainloop
else:
from tkinter import Tk
mainloop = False
DEBUG = False
def noisy(value, variance=0.01):
"""Add a small amount of noise to a value.
TODO: use proper gaussian noise"""
size = value * variance
return value + (random() * size * 2) - size
@contextmanager
def disable_turtle(t):
down = t.isdown()
if down:
t.penup()
yield
if down:
t.pendown()
def wrap(turtle, half_width, half_height):
"""Provide an pac-man style wrap-around world"""
x, y = turtle.pos()
new_x = new_y = None
if x > half_width:
new_x = x - half_width * 2
elif x < -half_width:
new_x = x + half_width * 2
if y > half_height:
new_y = y - half_height * 2
elif y < -half_height:
new_y = y + half_height * 2
with disable_turtle(turtle):
if new_x is not None:
turtle.setx(new_x)
if new_y is not None:
turtle.sety(new_y)
def clamp(turtle, half_width, half_height):
"""Clamp turtle to window"""
x, y = turtle.pos()
new_x = new_y = None
if x > half_width:
new_x = half_width
elif x < -half_width:
new_x = -half_width
if y > half_height:
new_y = half_height
elif y < -half_height:
new_y = -half_height
with disable_turtle(turtle):
if new_x is not None:
turtle.setx(new_x)
if new_y is not None:
turtle.sety(new_y)
class TurtleWorld(object):
def __init__(self, width, height, borders=wrap, title="TurtlePower"):
self.width = width
self.half_width = width // 2
self.height = height
self.half_height = height // 2
self.borders = borders
self.window_title = title
self.init_screen()
self.fps = 0
self.done = True
self.turtles = []
def init_screen(self):
# intialise screen and turn off auto-render
root = Tk()
root.wm_title(self.window_title)
window = TK.Canvas(master=root, width=self.width, height=self.height)
window.pack()
self.screen = TurtleScreen(window)
self.screen.tracer(0, 0)
def position_turtle(self, t, pos=None, angle=None):
# move to location
t.hideturtle()
t.penup()
if pos is None:
pos = (randint(-self.half_width, self.half_width),
randint(-self.half_height, self.half_height))
x, y = pos
t.goto(x, y)
if angle is None:
angle = random() * 360
t.setheading(angle)
# ready to go
t.showturtle()
t.pendown()
return t
def random_position(self, turtle):
return self.position_turtle(turtle)
def _print_fps(self): # pragma: no cover
if not self.done:
print(self.fps)
self.screen.ontimer(self._print_fps, 1000)
self.fps = 0
def create_turtle(self, callback, pos=None, angle=None):
t = PowerTurtle(self)
t.set_callback(callback)
self.position_turtle(t, pos, angle)
self.add_turtle(t)
return t
def add_turtle(self, turtle):
turtle.clear()
self.turtles.append(turtle)
def remove_turtle(self, turtle):
turtle.hideturtle()
turtle.clear()
self.turtles.remove(turtle)
def run(self, ticks=1000):
# run for 1000 ticks
self.done = False
if DEBUG:
self.screen.ontimer(self._print_fps, 1000)
self.ticks = ticks
self.screen.ontimer(self.tick, 33)
self.screen.update()
if mainloop:
mainloop()
else:
self.screen.mainloop()
def tick(self):
shuffle(self.turtles)
for t in self.turtles:
t._do_callback()
self.borders(t, self.half_width, self.half_height)
self.screen.update()
self.fps += 1
self.ticks -= 1
if self.ticks == 0:
self.done = True
else:
self.screen.ontimer(self.tick, 33)
class PowerTurtleMixin(object):
"""An extension of the basic turtle class.
Provides some extra methods to make things easy, plus senses and
integraton with TurtleWorld"""
# this string type is can be used as a simple type check for user code,
# rather than having to know about isinstance
type = "turtle"
def __init__(self, world, *args, **kwargs):
self.world = world
# pass arbitrary args to specific Turtle base class
super(PowerTurtleMixin, self).__init__(**kwargs)
self.setup()
def setup(self):
"""User-defined initialisation function, called once"""
def callback(self):
"""User-defined act function"""
def set_callback(self, callback):
"""Set the callback to a function, for classless usage"""
self.callback = lambda: callback(self)
def turn_towards(self, desired, amount):
"""Helper to to turn a small amount towards a heading"""
heading = self.heading()
angle = desired - heading
angle = (angle + 180) % 360 - 180
if angle >= 0:
amount = min(amount, angle)
else:
amount = max(-amount, angle)
self.left(amount)
return amount
def get_neighbours(self, distance=60, angle=120):
"""Other turtles you can see that are with distance and angle of your
current heading"""
neighbours = []
for t in self.world.turtles:
if t is not self:
a = abs(self.heading() - self.towards(t))
if a < angle and self.distance(t) < distance:
neighbours.append(t)
return neighbours
# Framework methods
def _do_callback(self):
"""Framework-level callback. Designed to be overridden in subclasses.
Can be use to provide extra functionality without affect tthe
user-defined callback() function."""
self.callback()
class PowerTurtle(PowerTurtleMixin, RawTurtle):
"""PowerTurtle based on stdlib turtle module"""
def __init__(self, world):
super(PowerTurtle, self).__init__(world, canvas=world.screen)
|
AllTheWayDown/turtlepower
|
turtlepower/world.py
|
Python
|
mit
| 6,489
|
[
"Gaussian"
] |
bdfa75073ea2fa53be21d4d575e435e86dacdb0e7299c0751a61d6fb234f383c
|
#!/usr/bin/env python
"""Python script to run cell model"""
"""
/* Copyright (c) 2015 EPFL-BBP, All rights reserved.
THIS SOFTWARE IS PROVIDED BY THE BLUE BRAIN PROJECT ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE BLUE BRAIN PROJECT
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
This work is licensed under a
Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.
To view a copy of this license, visit
http://creativecommons.org/licenses/by-nc-sa/4.0/legalcode or send a letter to
Creative Commons, 171 Second Street, Suite 300,
San Francisco, California, 94105, USA.
"""
"""
* @file run.py
* @brief Run simulation using pyneuron
* @author Werner Van Geit @ BBP
* @date 2015
"""
# pylint: disable=C0325, W0212, F0401, W0612, F0401
import os
import neuron
import numpy
import sys
def create_cell():
"""Create the cell model"""
# Load morphology
neuron.h.load_file("morphology.hoc")
# Load biophysics
neuron.h.load_file("biophysics.hoc")
# Load main cell template
neuron.h.load_file("template.hoc")
# Instantiate the cell from the template
print("Loading cell cADpyr232_L5_TTPC1_0fb1ca4724")
cell = neuron.h.cADpyr232_L5_TTPC1_0fb1ca4724(0)
return cell
def create_stimuli(cell, stim_start, stim_end, current_amplitude):
"""Create the stimuli"""
print('Attaching stimulus electrodes')
stimuli = []
iclamp = neuron.h.IClamp(0.5, sec=cell.soma[0])
iclamp.delay = stim_start
iclamp.dur = stim_end - stim_start
iclamp.amp = current_amplitude
print('Setting up step current clamp: '
'amp=%f nA, delay=%f ms, duration=%f ms' %
(iclamp.amp, iclamp.delay, iclamp.dur))
stimuli.append(iclamp)
return stimuli
def create_recordings(cell):
"""Create the recordings"""
print('Attaching recording electrodes')
recordings = {}
recordings['time'] = neuron.h.Vector()
recordings['soma(0.5)'] = neuron.h.Vector()
recordings['time'].record(neuron.h._ref_t, 0.1)
recordings['soma(0.5)'].record(cell.soma[0](0.5)._ref_v, 0.1)
return recordings
def run_RmpRiTau_step(
stim_start,
stim_end,
current_amplitude,
plot_traces=None):
"""Run """
cell = create_cell()
stimuli = create_stimuli(cell, stim_start, stim_end, current_amplitude) # noqa
recordings = create_recordings(cell)
# Overriding default 30s simulation,
neuron.h.tstop = stim_end + stim_start
print(
'Setting simulation time to %.6g ms for the step current' %
neuron.h.tstop)
print('Setting initial voltage to -70 mV')
neuron.h.v_init = -70
neuron.h.stdinit()
neuron.h.dt = 1000
neuron.h.t = -1e9
for _ in range(10):
neuron.h.fadvance()
neuron.h.t = 0
neuron.h.dt = 0.025
neuron.h.frecord_init()
neuron.h.continuerun(3000)
time = numpy.array(recordings['time'])
soma_voltage = numpy.array(recordings['soma(0.5)'])
recordings_dir = 'python_recordings'
soma_voltage_filename = os.path.join(
recordings_dir,
'soma_voltage_RmpRiTau_step.dat')
numpy.savetxt(soma_voltage_filename, zip(time, soma_voltage))
print('Soma voltage for RmpRiTau trace saved to: %s'
% (soma_voltage_filename))
if plot_traces:
import pylab
pylab.figure(facecolor='white')
pylab.plot(recordings['time'], recordings['soma(0.5)'])
pylab.xlabel('time (ms)')
pylab.ylabel('Vm (mV)')
pylab.gcf().canvas.set_window_title('RmpRiTau trace')
return time, soma_voltage, stim_start, stim_end
def init_simulation():
"""Initialise simulation environment"""
neuron.h.load_file("stdrun.hoc")
neuron.h.load_file("import3d.hoc")
print('Loading constants')
neuron.h.load_file('constants.hoc')
def analyse_RmpRiTau_trace(
time,
soma_voltage,
stim_start,
stim_end,
current_amplitude):
"""Analyse the output of the RmpRiTau protocol"""
# Import the eFeature Extraction Library
import efel
# Prepare the trace data
trace = {}
trace['T'] = time
trace['V'] = soma_voltage
trace['stim_start'] = [stim_start]
trace['stim_end'] = [stim_end]
# Calculate the necessary eFeatures
efel_results = efel.getFeatureValues(
[trace],
['voltage_base', 'steady_state_voltage_stimend',
'decay_time_constant_after_stim'])
voltage_base = efel_results[0]['voltage_base'][0]
ss_voltage = efel_results[0]['steady_state_voltage_stimend'][0]
dct = efel_results[0]['decay_time_constant_after_stim'][0]
# Calculate input resistance
input_resistance = float(ss_voltage - voltage_base) / current_amplitude
rmpritau_dict = {}
rmpritau_dict['Rmp'] = '%.6g' % voltage_base
rmpritau_dict['Rmp_Units'] = 'mV'
rmpritau_dict['Rin'] = '%.6g' % input_resistance
rmpritau_dict['Rin_Units'] = 'MOhm'
rmpritau_dict['Tau'] = '%.6g' % dct
rmpritau_dict['Tau_Units'] = 'ms'
print('Resting membrane potential is %s %s' %
(rmpritau_dict['Rmp'], rmpritau_dict['Rmp_Units']))
print('Input resistance is %s %s' %
(rmpritau_dict['Rin'], rmpritau_dict['Rin_Units']))
print('Time constant is %s %s' %
(rmpritau_dict['Tau'], rmpritau_dict['Tau_Units']))
import json
with open('rmp_ri_tau.json', 'w') as rmpritau_json_file:
json.dump(rmpritau_dict, rmpritau_json_file,
sort_keys=True,
indent=4,
separators=(',', ': '))
def main(plot_traces=False):
"""Main"""
# Import matplotlib to plot the traces
if plot_traces:
import matplotlib
matplotlib.rcParams['path.simplify'] = False
init_simulation()
current_amplitude = -0.01
stim_start = 1000
stim_end = 2000
time, soma_voltage, stim_start, stim_end = run_RmpRiTau_step(
stim_start, stim_end, current_amplitude, plot_traces=plot_traces)
analyse_RmpRiTau_trace(
time,
soma_voltage,
stim_start,
stim_end,
current_amplitude)
if plot_traces:
import pylab
pylab.show()
if __name__ == '__main__':
if len(sys.argv) == 1:
main(plot_traces=True)
elif len(sys.argv) == 2 and sys.argv[1] == '--no-plots':
main(plot_traces=False)
else:
raise Exception(
"Script only accepts one argument: --no-plots, not %s" %
str(sys.argv))
|
cigani/NEST
|
L5_TTPC1_cADpyr232_1/run_RmpRiTau.py
|
Python
|
mit
| 7,100
|
[
"NEURON",
"VisIt"
] |
5a624841d76bf75ff8b219cefcb6c527597beceec46ab4889fb85910ba0e05c0
|
__author__ = 'Ahmed Hani Ibrahim'
import abc
class LearningAlgorithm(object):
# __metaclass__ = abc.ABCMeta
@abc.abstractmethod
def learn(self, learningRate, input, output, network):
"""
:param learningRate: double
:param input: list
:param output: list
:param network: [[Neuron]]
:return: [[Neuron]]
"""
return
|
AhmedHani/Python-Neural-Networks-API
|
OptimizationAlgorithms/LearningAlgorithm.py
|
Python
|
mit
| 390
|
[
"NEURON"
] |
afaa744f25ff5037c8e86530d6df47d563d32c007a14f3d5094a2415094cb0d4
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 16 01:26:31 2015
Plot the rms of 'gradient' of two intensity edges/vectors shifted by a lag k
@author: sweel
"""
import matplotlib.pyplot as plt
import os
import cPickle as pickle
import seaborn as sns
import pandas as pd
from collections import defaultdict
from tubule_het.autoCor.lagsfun import iterlagspd
sns.set_context("talk")
plt.close('all')
def sclminmax(data):
"""return a scaled min max collection of vtk cellarray data
"""
vtkscaled = defaultdict(dict)
for key in data.keys():
flat = [el for lis in data[key] for el in lis]
fmax = max(flat)
fmin = min(flat)
vtkscaled[key] = []
for line in data[key]:
vtkscaled[key].append([(el - fmin) /
(fmax - fmin) for el in line])
return vtkscaled
# =============================================================================
# Data input
# =============================================================================
# pylint: disable=C0103
dirlist = []
# pylint: disable=C0103
for root, dirs, files in os.walk(os.getcwd()):
for f in dirs:
if f.startswith('YP'):
dirlist.append(
os.path.join(root, f))
DYL = pd.DataFrame()
SHL = pd.DataFrame()
DNL = pd.DataFrame()
DUL = pd.DataFrame()
for media in dirlist[:]:
labs = media[-3:]
print'\nNow on %s' % labs + "\n" + "="*79
# make sure the pkl file below exists, run MakeInputForLags.py otherwise
with open('%s_lagsRFP.pkl' % labs, 'rb') as inpt:
(randNDY, randUDY, Norm, NormPermute, data) = pickle.load(inpt)
randNDY = sclminmax(randNDY)
randUDY = sclminmax(randUDY)
Norm = sclminmax(Norm)
NormPermute = sclminmax(NormPermute)
dfDY = pd.DataFrame({cells: pd.Series([edge for edge
in Norm[cells]]) for cells
in Norm.keys()})
dfShf = pd.DataFrame({cells: pd.Series([edge for edge
in NormPermute[cells]]) for cells
in Norm.keys()})
dfU = pd.DataFrame({cells: pd.Series([edge for edge
in randUDY[cells]]) for cells
in Norm.keys()})
dfN = pd.DataFrame({cells: pd.Series([edge for edge
in randNDY[cells]]) for cells
in Norm.keys()})
cols1 = dfDY.columns
cols2 = dfShf.columns
cols3 = dfU.columns
cols4 = dfN.columns
pdDY = iterlagspd(dfDY, cols1, labs)
print'DY complete'
pdSh = iterlagspd(dfShf, cols2, labs)
print'Shuffle complete'
pdU = iterlagspd(dfU, cols3, labs)
print'Uniform complete'
pdN = iterlagspd(dfN, cols4, labs)
print'Normal complete'
DYL = DYL.append(pdDY, ignore_index=True)
SHL = SHL.append(pdSh, ignore_index=True)
DUL = DUL.append(pdU, ignore_index=True)
DNL = DNL.append(pdN, ignore_index=True)
DYL['type'] = r'$\Delta \Psi$ expt.'
SHL['type'] = 'Shuffled'
DNL['type'] = 'Normal Dist.'
DUL['type'] = 'Uniform Dist.'
BIG = pd.concat([DYL, SHL, DUL, DNL], ignore_index=True)
A = pd.melt(BIG,
id_vars=['cat', 'type'],
var_name='lags/k',
value_name='F(k)')
MASK = A['type'] == r'$\Delta \Psi$ expt.'
B = A[MASK]
with sns.plotting_context('talk', font_scale=1.25):
FIG1 = sns.factorplot(x='lags/k',
y='F(k)',
col='type',
hue='cat',
data=A,
col_wrap=2,
ci=99,
scale=.65)
plt.show()
sns.set(style='white')
plt.figure()
with sns.plotting_context('talk', font_scale=1.25):
FIG2 = sns.pointplot(x='lags/k',
y='F(k)',
hue='cat',
data=B,
ci=99)
sns.despine(top=True, right=True)
FIG2.set_ylabel('F(k)')
plt.show()
|
moosekaka/sweepython
|
tubule_het/rfp_analysis/lagsplotrfp.py
|
Python
|
mit
| 4,064
|
[
"VTK"
] |
f6b05fb88dc598ced1b87aeb3e69b79154de8a22aaeb5a4306f9dc765d60490f
|
#!/usr/bin/python
# Author: Ary Pablo Batista <arypbatista@gmail.com>
"""
This file is part of JSGobstones.
JSGobstones is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
JSGobstones is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with JSGobstones. If not, see <http://www.gnu.org/licenses/>.
"""
import os, os.path
import sys
class CmdApplication(object):
def initialize(self):
self.input_key = _Getch()
self.options = None
self.arguments = None
def usage(self):
return "No usage info.\n"
def get_option_handlers(self):
return {}
def get_option_switches(self):
return self.get_option_handlers().keys()
def error(self, message):
sys.stderr.write(message)
def get_option_name(self, option_switch):
return remove_no(normalize_option_name(option_switch.split(" ")[0]))
def start(self):
self.initialize()
self.arguments, self.options = get_options(self.get_option_switches())
options_processed = []
for option_switch, option_handler in self.get_option_handlers().iteritems():
option = self.get_option_name(option_switch)
if self.options[option] and not option_handler is None:
if isinstance(self.options[option], list):
option_handler(*self.options[option])
else:
option_handler()
options_processed += [option]
if len(options_processed) == 0:
self.error(self.usage().replace('<CMDLINE>', sys.argv[0]))
sys.exit(1)
def append_file(filename, content):
f = open(filename, 'a')
f.write(content)
f.close()
def write_file(filename, content):
f = open(filename, 'w')
f.write(content)
f.close()
def delete_file(filename):
os.unlink(filename)
def read_file(filename):
f = open(filename, "r")
content = f.read()
f.close()
return content
## Parser for option switches
def remove_no(option_name):
if option_name[:3] == "no-":
return option_name[3:]
else:
return option_name
def normalize_option_name(option_name):
if option_name[:2] == "--":
return option_name[2:]
else:
return option_name
def get_options(option_switches):
return parse_options(option_switches, sys.argv)
def default_options(option_switches):
opt = {}
for o in option_switches:
o = o.split(' ')
sw = o[0]
if sw[:3] == 'no-':
neg = True
sw = sw[3:]
else:
neg = False
if len(o) == 1:
opt[sw] = neg
else:
opt[sw] = []
return opt
def get_option_names(option_switches):
return [option.split(' ')[0] for option in option_switches]
def parse_options(option_switches, args, max_args=None):
arguments = []
option_names = get_option_names(option_switches)
switches = [normalize_option_name(s) for s in option_switches]
args = map(lambda arg: normalize_option_name(arg) if arg in option_names else arg, args)
opt = default_options(switches)
i = 1
n = len(args)
# all args
while i < len(args):
o = None
# select matching option
for oi in switches:
oi = oi.split(' ')
if oi[0] == args[i]:
o = oi
break
# This is an argument, not an option
if o is None:
if len(arguments) == max_args:
return False
arguments.append(args[i])
i += 1
continue
# Check if single-word option
# sw = current option
sw = o[0]
if len(o) == 1:
if sw[:3] == 'no-':
neg = True
sw = sw[3:]
else:
neg = False
opt[sw] = not neg
i += 1
# If has parameters
else:
k = 1
i += 1
while k < len(o):
if i >= n: return False
opt[sw].append(args[i])
i += 1
k += 1
return arguments, opt
class _Getch:
"""Gets a single character from standard input. Does not echo to the
screen."""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self): return self.impl()
class _GetchUnix:
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
import fnmatch
import os, shutil
def get_files_matching(directory, matching):
matches = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, matching):
matches.append(os.path.join(root, filename))
return matches
THIS_DIR = os.path.dirname(__file__)
BIN_DIR = os.path.join(THIS_DIR, "public", "jsgobstones")
class RunTypescript(CmdApplication):
def get_option_handlers(self):
return {
"--main X" : self.build,
"--purge" : self.purge,
"--no-build-html" : None,
"--no-clean" : None,
"--no-build-parser": None,
"--install" : self.install
}
def install(self):
print "Installing JSGobstones"
os.system("npm install")
os.system("bower install")
os.system("sudo gem install sass")
print "Done"
def purge(self):
print "The Purge begins!"
if os.path.exists(BIN_DIR):
shutil.rmtree(BIN_DIR)
print "Ready"
def build(self, mainfile, target="ES5"):
print "Compiling %s" % (mainfile,)
if self.options["build-parser"]:
print "Compiling parser"
os.system("pegjs -e \"var parser\" ./src/parser/GobstonesTranspiler.pegjs ./src/parser/GobstonesTranspiler.js".replace("\./", THIS_DIR))
print "Building macro's file"
os.system(os.path.join(THIS_DIR, "src", "compiler", "GenerateMacrosFile.py"))
output = os.path.join(BIN_DIR, os.path.basename(mainfile)[:-3] + ".js")
build_cmd = "tsc --sourcemap --target %s --out %s %s" % (target, output, mainfile)
if self.options["clean"]:
if os.path.exists(BIN_DIR):
shutil.rmtree(BIN_DIR)
os.mkdir(BIN_DIR)
os.system(build_cmd)
jsfiles = get_files_matching(os.path.dirname(mainfile), "*.js")
jsfiles = filter(lambda f: not mainfile[:-3] + ".js" in f and not "/gui/" in f, jsfiles)
for f in jsfiles:
os.system("mv %s %s" % (f, BIN_DIR))
#os.system("mv %s %s" % (mainfile[:-3]+".js", BIN_DIR))
os.system("gulp")
print "Ready"
RunTypescript().start()
|
lalo73/JSGobstones
|
compile.py
|
Python
|
gpl-3.0
| 7,657
|
[
"GULP"
] |
1dd9543bea83f209ce70349c6baf02a9270142a28b770b40f6a37a2b7ca5d5a3
|
# This file is part of OpenHatch.
# Copyright (C) 2010 Parker Phinney
# Copyright (C) 2010 Jack Grigg
# Copyright (C) 2009, 2010 OpenHatch, Inc.
# Copyright (C) 2010 Jessica McKellar
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from mysite.base.tests import make_twill_url, TwillTests
import mysite.base.models
import mysite.base.unicode_sanity
import mysite.account.tests
from mysite.profile.models import Person
import mysite.profile.models
import mysite.search.controllers
from mysite.search.models import Project, Bug, \
ProjectInvolvementQuestion, Answer, BugAlert
from mysite.search import views
import datetime
import mysite.project.views
import mysite.customs.bugtrackers
import hashlib
import simplejson
import mock
from twill import commands as tc
from django.test import TestCase
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.core.files.base import ContentFile
from django.contrib.auth.models import User
from django.db.models import Q
import MySQLdb
class SearchTest(TwillTests):
def search_via_twill(self, query=None):
search_url = "http://openhatch.org/search/"
if query:
search_url += '?q=%s' % query
tc.go(make_twill_url(search_url))
def search_via_client(self, query=None):
search_url = "/search/"
return self.client.get(search_url, {u'q': query})
def compare_lists(self, one, two):
self.assertEqual(len(one), len(two))
self.assertEqual(set(one), set(two))
def compare_lists_of_dicts(self, one, two, sort_key=None):
if sort_key is not None:
sort_fn = lambda thing: thing[sort_key]
else:
sort_fn = None
sorted_one = sorted(one, key=sort_fn)
sorted_two = sorted(two, key=sort_fn)
for k in range(len(sorted_one)):
try:
self.assertEqual(sorted_one[k], sorted_two[k])
except AssertionError:
import sys
print >> sys.stderr, sorted_one
print >> sys.stderr, sorted_two
raise
for k in range(len(sorted_two)):
try:
self.assertEqual(sorted_one[k], sorted_two[k])
except AssertionError:
import sys
print >> sys.stderr, sorted_one
print >> sys.stderr, sorted_two
raise
class AutoCompleteTests(SearchTest):
"""
Test whether the autocomplete can handle
- a field-specific query
-l a non-field-specific (fulltext) query
"""
def setUp(self):
SearchTest.setUp(self)
self.project_chat = Project.create_dummy(name=u'ComicChat', language=u'C++')
self.project_kazaa = Project.create_dummy(name=u'Kazaa', language=u'Vogon')
self.bug_in_chat = Bug.all_bugs.create(project=self.project_chat,
people_involved=2,
date_reported=datetime.date(2009, 4, 1),
last_touched=datetime.date(2009, 4, 2),
last_polled=datetime.date(2009, 4, 2),
submitter_realname="Zaphod Beeblebrox",
submitter_username="zb",
canonical_bug_link="http://example.com/",
)
def testSuggestionsMinimallyWorks(self):
suggestions = views.get_autocompletion_suggestions(u'')
self.assert_("lang:Vogon" in suggestions)
def testSuggestForAllFields(self):
c_suggestions = views.get_autocompletion_suggestions(u'C')
self.assert_(u'lang:C++' in c_suggestions)
self.assert_(u'project:ComicChat' in c_suggestions)
def testQueryNotFieldSpecificFindProject(self):
c_suggestions = views.get_autocompletion_suggestions(u'Comi')
self.assert_(u'project:ComicChat' in c_suggestions)
def testQueryFieldSpecific(self):
lang_C_suggestions = views.get_autocompletion_suggestions(
u'lang:C')
self.assert_(u'lang:C++' in lang_C_suggestions)
self.assert_(u'lang:Python' not in lang_C_suggestions)
self.assert_(u'project:ComicChat' not in lang_C_suggestions)
def testSuggestsCorrectStringsFormattedForJQueryAutocompletePlugin(self):
suggestions_list = views.get_autocompletion_suggestions(u'')
suggestions_string = views.list_to_jquery_autocompletion_format(
suggestions_list)
suggestions_list_reconstructed = suggestions_string.split("\n")
self.assert_("project:ComicChat" in suggestions_list_reconstructed)
self.assert_("lang:Vogon" in suggestions_list_reconstructed)
self.assert_("lang:C++" in suggestions_list_reconstructed)
def testSuggestsSomethingOverHttp(self):
response = self.client.get( u'/search/get_suggestions', {u'q': u'C'})
self.assertContains(response, "project:ComicChat\nlang:C++")
def testSuggesterFailsOnEmptyString(self):
response = self.client.get( u'/search/get_suggestions', {u'q': u''})
self.assertEquals(response.status_code, 500)
def testSuggesterFailsWithImproperQueryString(self):
response = self.client.get( u'/search/get_suggestions', {})
self.assertEquals(response.status_code, 500)
class SearchResultsSpecificBugs(SearchTest):
fixtures = ['short_list_of_bugs.json']
def setUp(self):
SearchTest.setUp(self)
query = u'PYTHON'
# The four canonical_filters by which a bug can match a query
whole_word = "[[:<:]]%s[[:>:]]" % query
self.canonical_filters = [
Q(project__language__iexact=query),
Q(title__iregex=whole_word),
Q(project__name__iregex=whole_word),
Q(description__iregex=whole_word)
]
def no_canonical_filters(self, except_one=Q()):
"""Returns the complex filter, 'Matches no canonical filters except the specified one.'"""
# Create the complex filter, 'Matches no canonical filters,
# except perhaps the specified one.'
other_c_filters = Q() # Initial value
for cf in self.canonical_filters:
if cf != except_one:
other_c_filters = other_c_filters | cf
# Read this as "Just that one filter and no others."
return except_one & ~other_c_filters
def test_that_fixture_works_properly(self):
"""Is the fixture is wired correctly?
To test the search engine, I've loaded the fixture with six
'canonical' bugs. Four of them are canonical matches, two
are canonical non-matches. A working search engine will return
just the matches.
There are four ways a bug can match a query:
- project language = the query
- project name contains the query
- project title contains the query
- project description contains the query
Let's call each of these a 'canonical filter'.
For each of these canonical filters, there should be a canonical bug
in the fixture that matches that, and only that, filter.
This test checks the fixture for the existence of these
canonical bugs.
If the fixture is wired correctly, when we search it for
'python', it will return bugs 1, 2, 3 and 4, and exclude
bugs 400 and 401. """
# Remember, a canonical bug meets ONLY ONE criterion.
# Assert there's just one canonical bug per criterion.
for cf in self.canonical_filters:
matches = Bug.all_bugs.filter(self.no_canonical_filters(except_one=cf))[:]
self.failUnlessEqual(len(matches), 1,
"There are %d, not 1, canonical bug(s) for the filter %s" % (len(matches), cf))
# Assert there's at least one canonical nonmatch.
canonical_non_matches = Bug.all_bugs.filter(self.no_canonical_filters())
self.assert_(len(canonical_non_matches) > 1)
def test_search_single_query(self):
"""Test that Query.get_bugs_unordered()
produces the expected results."""
response = self.client.get(u'/search/', {u'q': u'python'})
returned_bugs = response.context[0][u'bunch_of_bugs']
for cf in self.canonical_filters:
self.failUnless(Bug.all_bugs.filter(cf)[0] in returned_bugs,
"Search engine did not correctly use the filter %s" % cf)
for bug in Bug.all_bugs.filter(self.no_canonical_filters()):
self.failIf(bug in returned_bugs, "Search engine returned a false positive: %s." % bug)
def test_search_two_queries(self):
title_of_bug_to_include = u'An interesting title'
title_of_bug_to_exclude = "This shouldn't be in the results for [pyt*hon 'An interesting description']."
# If either of these bugs aren't there, then this test won't work properly.
self.assert_(len(list(Bug.all_bugs.filter(title=title_of_bug_to_include))) == 1)
self.assert_(len(list(Bug.all_bugs.filter(title=title_of_bug_to_exclude))) == 1)
response = self.client.get(u'/search/',
{u'q': u'python "An interesting description"'})
included_the_right_bug = False
excluded_the_wrong_bug = True
for bug in response.context[0][u'bunch_of_bugs']:
if bug.title == title_of_bug_to_include:
included_the_right_bug = True
if bug.title == title_of_bug_to_exclude:
excluded_the_wrong_bug = False
self.assert_(included_the_right_bug)
self.assert_(excluded_the_wrong_bug)
class TestThatQueryTokenizesRespectingQuotationMarks(TwillTests):
def test(self):
difficult = "With spaces (and parens)"
query = mysite.search.controllers.Query.create_from_GET_data({u'q': u'"%s"' % difficult})
self.assertEqual(query.terms, [difficult])
# Make there be a bug to find
project = Project.create_dummy(name=difficult)
Bug.create_dummy(project=project)
# How many bugs?
num_bugs = query.get_bugs_unordered().count()
self.assertEqual(num_bugs, 1)
class SearchResults(TwillTests):
fixtures = [u'bugs-for-two-projects.json']
def test_query_object_is_false_when_no_terms_or_facets(self):
query = mysite.search.controllers.Query.create_from_GET_data({})
self.assertFalse(query)
def test_show_no_bugs_if_no_query(self):
# Call up search page with no query.
response = self.client.get(u'/search/')
# The variable u'bunch_of_bugs', passed to the template, is a blank list.
self.assertEqual(response.context[0][u'bunch_of_bugs'], [])
def test_json_view(self):
tc.go(make_twill_url(u'http://openhatch.org/search/?format=json&jsoncallback=callback&q=python'))
response = tc.show()
self.assert_(response.startswith(u'callback'))
json_string_with_parens = response.split(u'callback', 1)[1]
self.assert_(json_string_with_parens[0] == u'(')
self.assert_(json_string_with_parens[-1] == u')')
json_string = json_string_with_parens[1:-1]
objects = simplejson.loads(json_string)
self.assert_(u'pk' in objects[0][u'bugs'][0])
def testPagination(self):
url = u'http://openhatch.org/search/'
tc.go(make_twill_url(url))
tc.fv(u'search_opps', u'q', u'python')
tc.submit()
# Grab descriptions of first 10 Exaile bugs
bugs = Bug.all_bugs.filter(project__name=
u'Exaile').order_by(u'-last_touched')[:10]
for bug in bugs:
tc.find(bug.description)
# Hit the next button
tc.follow(u'Next')
# Grab descriptions of next 10 Exaile bugs
bugs = Bug.all_bugs.filter(project__name=
u'Exaile').order_by(u'-last_touched')[10:20]
for bug in bugs:
tc.find(bug.description)
def testPaginationAndChangingSearchQuery(self):
url = u'http://openhatch.org/search/'
tc.go(make_twill_url(url))
tc.fv(u'search_opps', u'q', u'python')
tc.submit()
# Grab descriptions of first 10 Exaile bugs
bugs = Bug.all_bugs.filter(project__name=
u'Exaile').order_by(u'-last_touched')[:10]
for bug in bugs:
tc.find(bug.description)
# Hit the next button
tc.follow(u'Next')
# Grab descriptions of next 10 Exaile bugs
bugs = Bug.all_bugs.filter(project__name=
u'Exaile').order_by(u'-last_touched')[10:20]
for bug in bugs:
tc.find(bug.description)
# Now, change the query - do we stay that paginated?
tc.fv(u'search_opps', u'q', u'c#')
tc.submit()
# Grab descriptions of first 10 GNOME-Do bugs
bugs = Bug.all_bugs.filter(project__name=
u'GNOME-Do').order_by(
u'-last_touched')[:10]
for bug in bugs:
tc.find(bug.description)
class Recommend(SearchTest):
fixtures = ['user-paulproteus.json',
'person-paulproteus.json',
'cchost-data-imported-from-ohloh.json',
'bugs-for-two-projects.json',
'extra-fake-cchost-related-citations.json',
'tags']
# FIXME: Add a 'recommend_these_in_bug_search' field to TagType
# Use that to exclude 'will never understand' tags from recommended search terms.
@mock.patch('mysite.search.controllers.Query.get_or_create_cached_hit_count')
def test_get_recommended_search_terms_for_user(self, mocked_hit_counter):
# Make all the search terms appear to return results, so
# that none are excluded when we try to trim away
# the terms that don't return results.
# We test this functionality separately in
# search.tests.DontRecommendFutileSearchTerms.
mocked_hit_counter.return_value = 1
person = Person.objects.get(user__username='paulproteus')
recommended_terms = person.get_recommended_search_terms()
# By 'source' I mean a source of recommendations.
source2terms = {
'languages in citations': ['Automake', 'C#', 'C++', 'Make',
'Python', 'shell script', 'XUL'],
'projects in citations': ['Mozilla Firefox'],
'tags': ['algol', 'symbolist poetry', 'rails', 'chinese chess']
}
for source, terms in source2terms.items():
for term in terms:
self.assert_(term in recommended_terms,
"Expected %s in recommended search terms "
"inspired by %s." % (term, source))
# FIXME: Include recommendations from tags.
@mock.patch('mysite.search.controllers.Query.get_or_create_cached_hit_count')
def test_search_page_context_includes_recommendations(self, mocked_hit_counter):
# Make all the search terms appear to return results, so
# that none are excluded when we try to trim away
# the terms that don't return results.
# We test this functionality separately in
# search.tests.DontRecommendFutileSearchTerms.
mocked_hit_counter.return_value = 1
client = self.login_with_client()
response = client.get('/search/')
source2terms = {
'languages in citations': ['Automake', 'C#', 'C++', 'Make',
'Python', 'shell script', 'XUL'],
'projects in citations': ['Mozilla Firefox'],
'tags': ['algol', 'symbolist poetry', 'rails', 'chinese chess']
}
tags_in_template = [tup[1] for tup in response.context[0]['suggestions']]
for source, terms in source2terms.items():
for term in terms:
self.assert_(term in tags_in_template,
"Expected %s in template"
"inspired by %s." % (term, source))
expected_tags = sum(source2terms.values(), [])
self.compare_lists(expected_tags, tags_in_template)
# We're not doing this one because at the moment suggestions only work in JS.
# def test_recommendations_with_twill(self):
# self.login_with_twill()
# tc.go(make_twill_url('http://openhatch.org/search/'))
# tc.fv('suggested_searches', 'use_0', '0') # Automake
# tc.fv('suggested_searches', 'use_1', '0') # C
# tc.fv('suggested_searches', 'use_2', '0') # C++
# tc.fv('suggested_searches', 'use_3', '0') # Firefox
# tc.fv('suggested_searches', 'use_4', '0') # Python
# tc.fv('suggested_searches', 'use_5', '1') # XUL
# tc.fv('suggested_searches', 'start', '0')
# tc.fv('suggested_searches', 'end', '100')
# tc.submit()
#
# # Check that if you click checkboxes,
# # you get the right list of bugs.
# # Test for bugs that ought to be there
# # and bugs that ought not to be.
# tc.find("Yo! This is a bug in XUL but not Firefox")
# tc.find("Oy! This is a bug in XUL and Firefox")
#
# tc.fv('suggested_searches', 'use_0', '0') # Automake
# tc.fv('suggested_searches', 'use_1', '0') # C
# tc.fv('suggested_searches', 'use_2', '0') # C++
# tc.fv('suggested_searches', 'use_3', '1') # Firefox
# tc.fv('suggested_searches', 'use_4', '0') # Python
# tc.fv('suggested_searches', 'use_5', '1') # XUL
# tc.fv('suggested_searches', 'start', '0')
# tc.fv('suggested_searches', 'end', '100')
# tc.submit()
#
# tc.notfind("Yo! This is a bug in XUL but not Firefox")
# tc.find("Oy! This is a bug in XUL and Firefox")
class SplitIntoTerms(TestCase):
def test_split_into_terms(self):
easy = '1 2 3'
self.assertEqual(
mysite.search.controllers.Query.split_into_terms(easy),
['1', '2', '3'])
easy = '"1"'
self.assertEqual(
mysite.search.controllers.Query.split_into_terms(easy),
['1'])
easy = 'c#'
self.assertEqual(
mysite.search.controllers.Query.split_into_terms(easy),
['c#'])
class IconGetsScaled(SearchTest):
def test_project_scales_its_icon_down_for_use_in_badge(self):
'''This test shows that the Project class successfully stores
a scaled-down version of its icon in the icon_smaller_for_badge
field.'''
# Step 1: Create a project with an icon
p = mysite.search.models.Project.create_dummy()
image_data = open(mysite.account.tests.photo('static/sample-photo.png')).read()
p.icon_raw.save('', ContentFile(image_data))
p.save()
# Assertion 1: p.icon_smaller_for_badge is false (since not scaled yet)
self.assertFalse(p.icon_smaller_for_badge)
# Step 2: Call the scaling method
p.update_scaled_icons_from_self_icon()
p.save()
# Assertion 2: Verify that it is now a true value
self.assert_(p.icon_smaller_for_badge,
"Expected p.icon_smaller_for_badge to be a true value.")
# Assertion 3: Verify that it has the right width
self.assertEqual(p.icon_smaller_for_badge.width, 40,
"Expected p.icon_smaller_for_badge to be 40 pixels wide.")
def test_short_icon_is_scaled_correctly(self):
'''Sometimes icons are rectangular and more wide than long. These icons shouldn't be trammeled into a square, but scaled respectfully of their original ratios.'''
# Step 1: Create a project with an icon
p = mysite.search.models.Project.create_dummy()
# account.tests.photo finds the right path.
image_data = open(mysite.account.tests.photo(
'static/images/icons/test-project-icon-64px-by-18px.png')).read()
p.icon_raw.save('', ContentFile(image_data))
p.save()
# Assertion 1: p.icon_smaller_for_badge is false (since not scaled yet)
self.assertFalse(p.icon_smaller_for_badge)
# Step 2: Call the scaling method
p.update_scaled_icons_from_self_icon()
p.save()
# Assertion 2: Verify that it is now a true value
self.assert_(p.icon_smaller_for_badge,
"Expected p.icon_smaller_for_badge to be a true value.")
# Assertion 3: Verify that it has the right width
self.assertEqual(p.icon_smaller_for_badge.width, 40,
"Expected p.icon_smaller_for_badge to be 40 pixels wide.")
# Assertion 3: Verify that it has the right height
# If we want to scale exactly we'll get 11.25 pixels, which rounds to 11.
self.assertEqual(p.icon_smaller_for_badge.height, 11)
class SearchOnFullWords(SearchTest):
def test_find_perl_not_properly(self):
Project.create_dummy()
Bug.create_dummy(description='properly')
perl_bug = Bug.create_dummy(description='perl')
self.assertEqual(Bug.all_bugs.all().count(), 2)
results = mysite.search.controllers.Query(
terms=['perl']).get_bugs_unordered()
self.assertEqual(list(results), [perl_bug])
class SearchTemplateDecodesQueryString(SearchTest):
def test_facets_appear_in_search_template_context(self):
response = self.client.get('/search/', {'language': 'Python'})
expected_facets = { 'language': 'Python' }
self.assertEqual(response.context['query'].active_facet_options,
expected_facets)
class FacetsFilterResults(SearchTest):
def test_facets_filter_results(self):
facets = {u'language': u'Python'}
# Those facets should pick up this bug:
python_project = Project.create_dummy(language='Python')
python_bug = Bug.create_dummy(project=python_project)
# But not this bug
not_python_project = Project.create_dummy(language='Nohtyp')
Bug.create_dummy(project=not_python_project)
results = mysite.search.controllers.Query(
terms=[], active_facet_options=facets).get_bugs_unordered()
self.assertEqual(list(results), [python_bug])
class QueryGetPossibleFacets(SearchTest):
"""Ask a query, what facets are you going to show on the left?
E.g., search for gtk, it says C (541)."""
def test_get_possible_facets(self):
# Create three projects
project1 = Project.create_dummy(language=u'c')
project2 = Project.create_dummy(language=u'd')
project3 = Project.create_dummy(language=u'e')
# Give each project a bug
Bug.create_dummy(project=project1, description=u'bug', good_for_newcomers=True)
Bug.create_dummy(project=project2, description=u'bug')
Bug.create_dummy(project=project3, description=u'bAg')
# Search for bugs matching "bug", while constraining to the language C
query = mysite.search.controllers.Query(
terms=[u'bug'],
terms_string=u'bug',
active_facet_options={u'language': u'c'})
possible_facets = dict(query.get_possible_facets())
self.assertEqual(query.get_bugs_unordered().count(), 1)
# We expect that, language-wise, you should be able to select any of
# the other languages, or 'deselect' your language constraint.
self.compare_lists_of_dicts(
possible_facets[u'language'][u'options'],
[
{ u'name': u'c', u'query_string': u'q=bug&language=c',
u'is_active': True, u'count': 1 },
{ u'name': u'd', u'query_string': u'q=bug&language=d',
u'is_active': False, u'count': 1 },
# e is excluded because its bug (u'bAg') doesn't match the term 'bug'
],
sort_key=u'name'
)
self.compare_lists_of_dicts(
possible_facets[u'toughness'][u'options'],
[
# There's no 'any' option for toughness unless you've
# selected a specific toughness value
{ u'name': u'bitesize', u'is_active': False,
u'query_string': u'q=bug&toughness=bitesize&language=c', u'count': 1 },
],
sort_key=u'name'
)
self.assertEqual(
possible_facets['language']['the_any_option'],
{ u'name': u'any', u'query_string': u'q=bug&language=',
u'is_active': False, u'count': 2 },
)
def test_possible_facets_always_includes_active_facet(self):
# even when active facet has no results.
c = Project.create_dummy(language=u'c')
Project.create_dummy(language=u'd')
Project.create_dummy(language=u'e')
Bug.create_dummy(project=c, description=u'bug')
query = mysite.search.controllers.Query.create_from_GET_data(
{u'q': u'nothing matches this', u'language': u'c'})
language_options = dict(query.get_possible_facets())['language']['options']
language_options_named_c = [opt for opt in language_options if opt['name'] == 'c']
self.assertEqual(len(language_options_named_c), 1)
class SingleTerm(SearchTest):
"""Search for just a single term."""
def setUp(self):
SearchTest.setUp(self)
python_project = Project.create_dummy(language='Python')
perl_project = Project.create_dummy(language='Perl')
c_project = Project.create_dummy(language='C')
# bitesize, matching bug in Python
Bug.create_dummy(project=python_project, good_for_newcomers=True,
description='screensaver')
# nonbitesize, matching bug in Python
Bug.create_dummy(project=python_project, good_for_newcomers=False,
description='screensaver')
# nonbitesize, matching bug in Perl
Bug.create_dummy(project=perl_project, good_for_newcomers=False,
description='screensaver')
# nonbitesize, nonmatching bug in C
Bug.create_dummy(project=c_project, good_for_newcomers=False,
description='toast')
GET_data = { 'q': 'screensaver' }
query = mysite.search.controllers.Query.create_from_GET_data(GET_data)
self.assertEqual(query.terms, ['screensaver'])
self.assertFalse(query.active_facet_options) # No facets
self.output_possible_facets = dict(query.get_possible_facets())
def test_toughness_facet(self):
# What options do we expect?
toughness_option_bitesize = {'name': 'bitesize', 'count': 1,
'is_active': False,
'query_string': 'q=screensaver&toughness=bitesize'}
toughness_option_any = {'name': 'any', 'count': 3,
'is_active': True,
'query_string': 'q=screensaver&toughness='}
expected_toughness_facet_options = [toughness_option_bitesize]
self.assertEqual(
self.output_possible_facets['toughness']['options'],
expected_toughness_facet_options
)
self.assertEqual(
self.output_possible_facets['toughness']['the_any_option'],
toughness_option_any
)
def test_languages_facet(self):
# What options do we expect?
languages_option_python = {'name': 'Python', 'count': 2,
'is_active': False,
'query_string': 'q=screensaver&language=Python'}
languages_option_perl = {'name': 'Perl', 'count': 1,
'is_active': False,
'query_string': 'q=screensaver&language=Perl'}
languages_option_any = {'name': 'any', 'count': 3,
'is_active': True,
'query_string': 'q=screensaver&language='}
expected_languages_facet_options = [
languages_option_python,
languages_option_perl,
]
self.compare_lists_of_dicts(
self.output_possible_facets['language']['options'],
expected_languages_facet_options
)
self.assertEqual(
self.output_possible_facets['language']['the_any_option'],
languages_option_any)
class SingleFacetOption(SearchTest):
"""Browse bugs matching a single facet option."""
def setUp(self):
SearchTest.setUp(self)
python_project = Project.create_dummy(language='Python')
perl_project = Project.create_dummy(language='Perl')
c_project = Project.create_dummy(language='C')
# bitesize, matching bug in Python
Bug.create_dummy(project=python_project, good_for_newcomers=True,
description='screensaver')
# nonbitesize, matching bug in Python
Bug.create_dummy(project=python_project, good_for_newcomers=False,
description='screensaver')
# nonbitesize, matching bug in Perl
Bug.create_dummy(project=perl_project, good_for_newcomers=False,
description='screensaver')
# nonbitesize, nonmatching bug in C
Bug.create_dummy(project=c_project, good_for_newcomers=False,
description='toast')
GET_data = { u'language': u'Python' }
query = mysite.search.controllers.Query.create_from_GET_data(GET_data)
self.assertFalse(query.terms) # No terms
self.assertEqual(query.active_facet_options, {u'language': u'Python'})
self.output_possible_facets = dict(query.get_possible_facets())
def test_toughness_facet(self):
# What options do we expect?
toughness_option_bitesize = {u'name': u'bitesize', u'count': 1,
u'is_active': False,
u'query_string': u'q=&toughness=bitesize&language=Python'}
toughness_option_any = {u'name': u'any', u'count': 2,
u'is_active': True,
u'query_string': u'q=&toughness=&language=Python'}
expected_toughness_facet_options = [toughness_option_bitesize]
self.compare_lists_of_dicts(
self.output_possible_facets[u'toughness'][u'options'],
expected_toughness_facet_options
)
self.assertEqual(
self.output_possible_facets[u'toughness'][u'the_any_option'],
toughness_option_any
)
def test_languages_facet(self):
# What options do we expect?
languages_option_python = {u'name': u'Python', u'count': 2,
u'is_active': True,
u'query_string': u'q=&language=Python'}
languages_option_perl = {u'name': u'Perl', u'count': 1,
u'is_active': False,
u'query_string': u'q=&language=Perl'}
languages_option_c = {u'name': u'C', u'count': 1,
u'is_active': False,
u'query_string': u'q=&language=C'}
languages_option_any = {u'name': u'any', u'count': 4,
u'is_active': False,
u'query_string': u'q=&language='}
expected_languages_facet_options = [
languages_option_python,
languages_option_perl,
languages_option_c,
]
self.compare_lists_of_dicts(
self.output_possible_facets[u'language'][u'options'],
expected_languages_facet_options
)
self.assertEqual(
self.output_possible_facets[u'language'][u'the_any_option'],
languages_option_any,
)
class QueryGetToughnessFacetOptions(SearchTest):
def test_get_toughness_facet_options(self):
# We create three "bitesize" bugs, but constrain the Query so
# that we're only looking at bugs in Python.
# Since only two of the bitesize bugs are in Python (one is
# in a project whose language is Perl), we expect only 1 bitesize
# bug to show up, and 2 total bugs.
python_project = Project.create_dummy(language=u'Python')
perl_project = Project.create_dummy(language=u'Perl')
Bug.create_dummy(project=python_project, good_for_newcomers=True)
Bug.create_dummy(project=python_project, good_for_newcomers=False)
Bug.create_dummy(project=perl_project, good_for_newcomers=True)
query = mysite.search.controllers.Query(
active_facet_options={u'language': u'Python'},
terms_string=u'')
output = query.get_facet_options(u'toughness', [u'bitesize', u''])
bitesize_dict = [d for d in output if d[u'name'] == u'bitesize'][0]
all_dict = [d for d in output if d[u'name'] == u'any'][0]
self.assertEqual(bitesize_dict[u'count'], 1)
self.assertEqual(all_dict[u'count'], 2)
def test_get_toughness_facet_options_with_terms(self):
python_project = Project.create_dummy(language=u'Python')
perl_project = Project.create_dummy(language=u'Perl')
Bug.create_dummy(project=python_project, good_for_newcomers=True,
description=u'a')
Bug.create_dummy(project=python_project, good_for_newcomers=False,
description=u'a')
Bug.create_dummy(project=perl_project, good_for_newcomers=True,
description=u'b')
GET_data = {u'q': u'a'}
query = mysite.search.controllers.Query.create_from_GET_data(GET_data)
output = query.get_facet_options(u'toughness', [u'bitesize', u''])
bitesize_dict = [d for d in output if d[u'name'] == u'bitesize'][0]
all_dict = [d for d in output if d[u'name'] == u'any'][0]
self.assertEqual(bitesize_dict[u'count'], 1)
self.assertEqual(all_dict[u'count'], 2)
class QueryGetPossibleLanguageFacetOptionNames(SearchTest):
@mock.patch('mysite.search.tasks.PopulateProjectLanguageFromOhloh')
def setUp(self, do_nothing):
SearchTest.setUp(self)
python_project = Project.create_dummy(language=u'Python')
perl_project = Project.create_dummy(language=u'Perl')
c_project = Project.create_dummy(language=u'C')
unknown_project = Project.create_dummy(language=u'')
Bug.create_dummy(project=python_project, title=u'a')
Bug.create_dummy(project=perl_project, title=u'a')
Bug.create_dummy(project=c_project, title=u'b')
Bug.create_dummy(project=unknown_project, title=u'unknowable')
def test_with_term(self):
# In the setUp we create three bugs, but only two of them would match
# a search for 'a'. They are in two different languages, so let's make
# sure that we show only those two languages.
GET_data = {u'q': u'a'}
query = mysite.search.controllers.Query.create_from_GET_data(GET_data)
language_names = query.get_language_names()
self.assertEqual(
sorted(language_names),
sorted([u'Python', u'Perl']))
def test_with_active_language_facet(self):
# In the setUp we create bugs in three languages.
# Here, we verify that the get_language_names() method correctly returns
# all three languages, even though the GET data shows that we are
# browsing by language.
GET_data = {u'language': u'Python'}
query = mysite.search.controllers.Query.create_from_GET_data(GET_data)
language_names = query.get_language_names()
self.assertEqual(
sorted(language_names),
sorted([u'Python', u'Perl', u'C', u'Unknown']))
def test_with_language_as_unknown(self):
# In the setUp we create bugs in three languages.
# Here, we verify that the get_language_names() method correctly returns
# all three languages, even though the GET data shows that we are
# browsing by language.
GET_data = {u'language': u'Unknown'}
query = mysite.search.controllers.Query.create_from_GET_data(GET_data)
language_names = query.get_language_names()
self.assertEqual(
sorted(language_names),
sorted([u'Python', u'Perl', u'C', u'Unknown']))
def test_with_language_as_unknown_and_query(self):
# In the setUp we create bugs in three languages.
# Here, we verify that the get_language_names() method correctly returns
# all three languages, even though the GET data shows that we are
# browsing by language.
GET_data = {u'language': u'Unknown', u'q': u'unknowable'}
query = mysite.search.controllers.Query.create_from_GET_data(GET_data)
match_count = query.get_bugs_unordered().count()
self.assertEqual(match_count, 1)
class QueryGetPossibleProjectFacetOptions(SearchTest):
@mock.patch('mysite.search.tasks.PopulateProjectLanguageFromOhloh')
def setUp(self, do_nothing):
SearchTest.setUp(self)
projects = [
Project.create_dummy(name=u'Miro'),
Project.create_dummy(name=u'Dali'),
Project.create_dummy(name=u'Magritte')
]
for p in projects:
Bug.create_dummy(project=p)
def test_select_a_project_and_see_other_project_options(self):
GET_data = {u'project': u'Miro'}
query = mysite.search.controllers.Query.create_from_GET_data(GET_data)
possible_project_names = [x['name'] for x in dict(query.get_possible_facets())['project']['options']]
self.assertEqual(
sorted(possible_project_names),
sorted(list(Project.objects.values_list('name', flat=True))))
class QueryContributionType(SearchTest):
def setUp(self):
SearchTest.setUp(self)
python_project = Project.create_dummy(language=u'Python')
perl_project = Project.create_dummy(language=u'Perl')
c_project = Project.create_dummy(language=u'C')
Bug.create_dummy(project=python_project, title=u'a')
Bug.create_dummy(project=perl_project, title=u'a',
concerns_just_documentation=True)
Bug.create_dummy(project=c_project, title=u'b')
def test_contribution_type_is_an_available_facet(self):
GET_data = {}
starting_query = mysite.search.controllers.Query.create_from_GET_data(
GET_data)
self.assert_(u'contribution type' in dict(starting_query.get_possible_facets()))
def test_contribution_type_options_are_reasonable(self):
GET_data = {}
starting_query = mysite.search.controllers.Query.create_from_GET_data(
GET_data)
cto = starting_query.get_facet_options(u'contribution_type',
[u'documentation'])
documentation_one, = [k for k in cto if k[u'name'] == u'documentation']
any_one = starting_query.get_facet_options(u'contribution_type', [u''])[0]
self.assertEqual(documentation_one[u'count'], 1)
self.assertEqual(any_one[u'count'], 3)
class QueryProject(SearchTest):
def setUp(self):
SearchTest.setUp(self)
python_project = Project.create_dummy(language=u'Python',
name='thingamajig')
c_project = Project.create_dummy(language=u'C',
name='thingamabob')
Bug.create_dummy(project=python_project, title=u'a')
Bug.create_dummy(project=python_project, title=u'a',
concerns_just_documentation=True)
Bug.create_dummy(project=c_project, title=u'b')
def test_project_is_an_available_facet(self):
GET_data = {}
starting_query = mysite.search.controllers.Query.create_from_GET_data(
GET_data)
self.assert_(u'project' in dict(starting_query.get_possible_facets()))
def test_contribution_type_options_are_reasonable(self):
GET_data = {}
starting_query = mysite.search.controllers.Query.create_from_GET_data(
GET_data)
cto = starting_query.get_facet_options(u'project',
[u'thingamajig',
u'thingamabob' ])
jig_ones, = [k for k in cto if k[u'name'] == u'thingamajig']
any_one = starting_query.get_facet_options(u'project', [u''])[0]
self.assertEqual(jig_ones[u'count'], 2)
self.assertEqual(any_one[u'count'], 3)
class QueryStringCaseInsensitive(SearchTest):
def test_Language(self):
"""Do we redirect queries that use non-lowercase facet keys to pages
that use lowercase facet keys?"""
redirects = self.client.get(u'/search/',
{u'LANguaGE': u'pytHon'}, follow=True).redirect_chain
self.assertEqual(redirects, [(u'http://testserver/search/?language=pytHon', 302)])
class HashQueryData(SearchTest):
def test_queries_with_identical_data_hash_alike(self):
GET_data = {u'q': u'socialguides', u'language': u'looxii'}
one = mysite.search.controllers.Query.create_from_GET_data(GET_data)
two = mysite.search.controllers.Query.create_from_GET_data(GET_data)
self.assertEqual(one.get_sha1(), two.get_sha1())
def test_queries_with_equiv_data_expressed_differently_hash_alike(self):
GET_data_1 = {u'q': u'socialguides zetapage', u'language': u'looxii'}
GET_data_2 = {u'q': u'zetapage socialguides', u'language': u'looxii'}
one = mysite.search.controllers.Query.create_from_GET_data(GET_data_1)
two = mysite.search.controllers.Query.create_from_GET_data(GET_data_2)
self.assertEqual(one.get_sha1(), two.get_sha1())
def test_queries_with_different_data_hash_differently(self):
GET_data_1 = {u'q': u'socialguides zetapage', u'language': u'looxii'}
GET_data_2 = {u'q': u'socialguides ninjapost', u'language': u'looxii'}
one = mysite.search.controllers.Query.create_from_GET_data(GET_data_1)
two = mysite.search.controllers.Query.create_from_GET_data(GET_data_2)
self.assertNotEqual(one.get_sha1(), two.get_sha1())
# How on earth do we test for collisions?
class QueryGrabHitCount(SearchTest):
def test_eventhive_grab_hitcount_once_stored(self):
data = {u'q': u'eventhive', u'language': u'shoutNOW'}
query = mysite.search.controllers.Query.create_from_GET_data(data)
stored_hit_count = 10
# Get the cache key used to store the hit count.
hit_count_cache_key = query.get_hit_count_cache_key()
# Set the cache value.
cache.set(hit_count_cache_key, stored_hit_count)
# Test that it is fetched correctly.
self.assertEqual(query.get_or_create_cached_hit_count(), stored_hit_count)
def test_shoutnow_cache_hitcount_on_grab(self):
project = Project.create_dummy(language=u'shoutNOW')
Bug.create_dummy(project=project)
data = {u'language': u'shoutNOW'}
query = mysite.search.controllers.Query.create_from_GET_data(data)
expected_hit_count = 1
self.assertEqual(query.get_or_create_cached_hit_count(), expected_hit_count)
# Get the cache key used to store the hit count.
hit_count_cache_key = query.get_hit_count_cache_key()
# Get the cache value.
stored_hit_count = cache.get(hit_count_cache_key)
print "Stored: %s" % stored_hit_count
# Test that it was stored correctly.
self.assertEqual(stored_hit_count, expected_hit_count)
class ClearCacheWhenBugsChange(SearchTest):
def test_cached_cleared_after_bug_save_or_delete(self):
data = {u'language': u'shoutNOW'}
query = mysite.search.controllers.Query.create_from_GET_data(data)
old_hcc_timestamp = mysite.base.models.Timestamp.get_timestamp_for_string(
'hit_count_cache_timestamp')
# Cache entry created after hit count retrieval
query.get_or_create_cached_hit_count()
new_hcc_timestamp = mysite.base.models.Timestamp.get_timestamp_for_string(
'hit_count_cache_timestamp')
self.assertEqual(old_hcc_timestamp, new_hcc_timestamp)
# Cache cleared after bug save
project = Project.create_dummy(language=u'shoutNOW')
bug = Bug.create_dummy(project=project)
newer_hcc_timestamp = mysite.base.models.Timestamp.get_timestamp_for_string(
'hit_count_cache_timestamp')
self.assertNotEqual(new_hcc_timestamp, newer_hcc_timestamp)
# Cache entry created after hit count retrieval
query.get_or_create_cached_hit_count()
newest_hcc_timestamp = mysite.base.models.Timestamp.get_timestamp_for_string(
'hit_count_cache_timestamp')
self.assertEqual(newer_hcc_timestamp, newest_hcc_timestamp)
# Cache cleared after bug deletion
bug.delete()
newester_hcc_timestamp = mysite.base.models.Timestamp.get_timestamp_for_string(
'hit_count_cache_timestamp'),
self.assertNotEqual(newest_hcc_timestamp, newester_hcc_timestamp)
class DontRecommendFutileSearchTerms(TwillTests):
def test_removal_of_futile_terms(self):
mysite.search.models.Bug.create_dummy_with_project(description=u'useful')
self.assertEqual(
Person.only_terms_with_results([u'useful', u'futile']),
[u'useful'])
class PublicizeBugTrackerIndex(SearchTest):
def setUp(self):
SearchTest.setUp(self)
self.search_page_response = self.client.get(reverse(mysite.search.views.fetch_bugs))
self.bug_tracker_count = mysite.search.controllers.get_project_count()
def test_search_template_contains_bug_tracker_count(self):
self.assertEqual(
self.search_page_response.context[0][u'project_count'],
self.bug_tracker_count)
class TestPotentialMentors(TwillTests):
fixtures = ['user-paulproteus', 'user-barry', 'person-barry', 'person-paulproteus']
def test(self):
'''Create a Banshee mentor who can do C#
and a separate C# mentor, and verify that Banshee thinks it has
two potential mentors.'''
banshee = Project.create_dummy(name='Banshee', language='C#')
can_mentor, _ = mysite.profile.models.TagType.objects.get_or_create(name=u'can_mentor')
willing_to_mentor_banshee, _ = mysite.profile.models.Tag.objects.get_or_create(
tag_type=can_mentor,
text=u'Banshee')
willing_to_mentor_c_sharp, _ = mysite.profile.models.Tag.objects.get_or_create(
tag_type=can_mentor,
text=u'C#')
link = mysite.profile.models.Link_Person_Tag(
person=Person.objects.get(user__username=u'paulproteus'),
tag=willing_to_mentor_banshee)
link.save()
link = mysite.profile.models.Link_Person_Tag(
person=Person.objects.get(user__username=u'paulproteus'),
tag=willing_to_mentor_c_sharp)
link.save()
link = mysite.profile.models.Link_Person_Tag(
person=Person.objects.get(user__username=u'barry'),
tag=willing_to_mentor_c_sharp)
link.save()
banshee_mentors = banshee.potential_mentors
self.assertEqual(len(banshee_mentors), 2)
class SuggestAlertOnLastResultsPage(TwillTests):
fixtures = ['user-paulproteus']
def exercise_alert(self, anonymous=True):
"""The 'anonymous' parameter allows the alert functionality to be
tested for anonymous and logged-in users."""
if not anonymous:
self.login_with_twill()
# Create some dummy data
p = Project.create_dummy(language='ruby')
# 15 bugs matching 'ruby'
for i in range(15):
b = Bug.create_dummy(description='ruby')
b.project = p
b.save()
# Visit the first page of a vol. opp. search results page.
opps_view = mysite.search.views.fetch_bugs
query = u'ruby'
opps_query_string = { u'q': query, u'start': 1, u'end': 10}
opps_url = make_twill_url('http://openhatch.org'+reverse(opps_view) + '?' + mysite.base.unicode_sanity.urlencode(opps_query_string))
tc.go(opps_url)
# Make sure we *don't* have the comment that flags this as a page that offers an email alert subscription button
tc.notfind("this page should offer a link to sign up for an email alert")
# Visit the last page of results
GET = { u'q': query, u'start': 11, u'end': 20}
query_string = mysite.base.unicode_sanity.urlencode(GET)
opps_url = make_twill_url('http://openhatch.org'+reverse(opps_view) + '?' + query_string)
tc.go(opps_url)
# make sure we /do/ have the comment that flags this as a page that
# offers an email alert subscription button
tc.find("this page should offer a link to sign up for an email alert")
if not anonymous:
# if the user is logged in, make sure that we have autopopulated
# the form with her email address
tc.find(User.objects.get(username='paulproteus').email)
# Submit the 'alert' form.
email_address = 'yetanother@ema.il'
tc.fv('alert', 'email', email_address)
tc.submit()
if anonymous:
client = self.client
else:
client = self.login_with_client()
alert_data_in_form = {
'query_string': query_string,
'how_many_bugs_at_time_of_request': Bug.open_ones.filter(project=p).count(),
'email': email_address,
}
# Twill fails here for some reason, so let's continue the journey with
# Django's built-in testing sweeeet
response = client.post(reverse(mysite.search.views.subscribe_to_bug_alert_do), alert_data_in_form)
# This response should be a HTTP redirect instruction
self.assertEqual(response.status_code, 302)
redirect_target_url = response._headers['location'][1]
self.assert_(query_string in redirect_target_url)
# The page redirects to the old kk
response = client.get(redirect_target_url)
self.assertContains(response, "this page should confirm that an email alert has been registered")
# At this point, make sure that the DB contains a record of
# * What the query was.
# * When the request was made.
# * How many bugs were returned by the query at the time of request.
# There should be only one alert
all_alerts = BugAlert.objects.all()
self.assertEqual(all_alerts.count(), 1)
alert_record = all_alerts[0]
self.assert_(alert_record)
assert_that_record_has_this_data = alert_data_in_form
# For the logged-in user, also check that the record contains the
# identity of the user who made the alert request.
if not anonymous:
assert_that_record_has_this_data['user'] = User.objects.get(username='paulproteus')
for key, expected_value in assert_that_record_has_this_data.items():
self.assertEqual(alert_record.__getattribute__(key), expected_value,
'alert.%s = %s not (expected) %s' % (key, alert_record.__getattribute__(key), expected_value))
# run the above test for our two use cases: logged in and not
def test_alert_anon(self):
self.exercise_alert(anonymous=True)
def test_alert_logged_in(self):
self.exercise_alert(anonymous=False)
class DeleteAnswer(TwillTests):
fixtures = ['user-paulproteus']
def test_delete_paragraph_answer(self):
# create dummy question
p = Project.create_dummy(name='Ubuntu')
question__pk = 0
q = ProjectInvolvementQuestion.create_dummy(pk=question__pk, is_bug_style=False)
# create our dummy answer
a = Answer.create_dummy(text='i am saying thigns', question=q, project=p, author=User.objects.get(username='paulproteus'))
# delete our answer
POST_data = {
'answer__pk': a.pk,
}
POST_handler = reverse(mysite.project.views.delete_paragraph_answer_do)
response = self.login_with_client().post(POST_handler, POST_data)
# go back to the project page and make sure that our answer isn't there anymore
project_url = p.get_url()
self.assertRedirects(response, project_url)
project_page = self.login_with_client().get(project_url)
self.assertNotContains(project_page, a.text)
# and make sure our answer isn't in the db anymore
self.assertEqual(Answer.objects.filter(pk=a.pk).count(), 0)
def test_delete_bug_answer(self):
# create dummy question
p = Project.create_dummy(name='Ubuntu')
# it's important that this pk correspond to the pk of an actual
# bug_style question, as specified in our view otherwise, we'll
# get_or_create will try to create, but it won't be able to because of
# a unique key error
question__pk = 2
q = ProjectInvolvementQuestion.create_dummy(pk=question__pk, is_bug_style=True)
# create our dummy answer
a = Answer.create_dummy(title='i want this bug fixed', text='for these reasons',question=q, project=p, author=User.objects.get(username='paulproteus'))
# delete our answer
POST_data = {
'answer__pk': a.pk,
}
POST_handler = reverse(mysite.project.views.delete_paragraph_answer_do)
response = self.login_with_client().post(POST_handler, POST_data)
# go back to the project page and make sure that our answer isn't there anymore
project_url = p.get_url()
self.assertRedirects(response, project_url)
project_page = self.login_with_client().get(project_url)
self.assertNotContains(project_page, a.title)
# and make sure our answer isn't in the db anymore
self.assertEqual(Answer.objects.filter(pk=a.pk).count(), 0)
class CreateBugAnswer(TwillTests):
fixtures = ['user-paulproteus']
def test_create_bug_answer(self):
# go to the project page
p = Project.create_dummy(name='Ubuntu')
question__pk = 1
question = ProjectInvolvementQuestion.create_dummy(
key_string='non_code_participation', is_bug_style=True)
question.save()
title = 'omfg i wish this bug would go away'
text = 'kthxbai'
POST_data = {
'project__pk': p.pk,
'question__pk': str(question__pk),
'answer__title': title,
'answer__text': text
}
POST_handler = reverse(mysite.project.views.create_answer_do)
response = self.login_with_client().post(POST_handler, POST_data)
# try to get the BugAnswer which we just submitted from the database
our_bug_answer = Answer.objects.get(title=title)
# make sure it has the right attributes
self.assertEqual(our_bug_answer.text, text)
self.assertEqual(our_bug_answer.question.pk, question__pk)
self.assertEqual(our_bug_answer.project.pk, p.pk)
project_url = p.get_url()
self.assertRedirects(response, project_url)
project_page = self.login_with_client().get(project_url)
# make sure that our data shows up on the page
self.assertContains(project_page, title)
self.assertContains(project_page, text)
class WeTakeOwnershipOfAnswersAtLogin(TwillTests):
fixtures = ['user-paulproteus', 'person-paulproteus']
def test_create_answer_but_take_ownership_at_login_time(self):
session = {}
# Create the Answer object, but set its User to None
answer = Answer.create_dummy()
answer.author = None
answer.is_published = False
answer.save()
# Verify that the Answer object is not available by .objects()
self.assertFalse(Answer.objects.all())
# Store the Answer IDs in the session
mysite.project.controllers.note_in_session_we_control_answer_id(session, answer.id)
self.assertEqual(session['answer_ids_that_are_ours'], [answer.id])
# If you want to look at those answers, you can this way:
stored_answers = mysite.project.controllers.get_unsaved_answers_from_session(session)
self.assertEqual([answer.id for answer in stored_answers], [answer.id])
# Verify that the Answer object is still not available by .objects()
self.assertFalse(Answer.objects.all())
# At login time, take ownership of those Answer IDs
mysite.project.controllers.take_control_of_our_answers(
User.objects.get(username='paulproteus'), session)
# And now we own it!
self.assertEqual(Answer.objects.all().count(), 1)
class CreateAnonymousAnswer(TwillTests):
fixtures = ['user-paulproteus']
def test_create_answer_anonymously(self):
# Steps for this test
# 1. User fills in the form anonymously
# 2. We test that the Answer is not yet saved
# 3. User logs in
# 4. We test that the Answer is saved
p = Project.create_dummy(name='Myproject')
q = ProjectInvolvementQuestion.create_dummy(
key_string='where_to_start', is_bug_style=False)
# Do a GET on the project page to prove cookies work.
self.client.get(p.get_url())
# POST some text to the answer creation post handler
answer_text = """Help produce official documentation, share the solution to a problem, or check, proof and test other documents for accuracy."""
POST_data = {
'project__pk': p.pk,
'question__pk': q.pk,
'answer__text': answer_text,
}
response = self.client.post(reverse(mysite.project.views.create_answer_do), POST_data,
follow=True)
self.assertEqual(response.redirect_chain,
[('http://testserver/account/login/?next=%2F%2Bprojects%2FMyproject', 302)])
# If this were an Ajaxy post handler, we might assert something about
# the response, like
# self.assertEqual(response.content, '1')
# check that the db contains a record with this text
try:
record = Answer.all_even_unowned.get(text=POST_data['answer__text'])
except Answer.DoesNotExist:
print "All Answers:", Answer.all_even_unowned.all()
raise Answer.DoesNotExist
self.assertEqual(record.project, p)
self.assertEqual(record.question, q)
self.assertFalse(Answer.objects.all()) # it's unowned
# Now, the session will know about the answer, but the answer will not be published.
# Visit the login page, assert that the page contains the text of the answer.
response = self.client.get(reverse('oh_login'))
self.assertContains(response, POST_data['answer__text'])
# But when the user is logged in and *then* visits the project page
login_worked = self.client.login(username='paulproteus',
password="paulproteus's unbreakable password")
self.assert_(login_worked)
self.client.get(p.get_url())
# Now, the Answer should have an author whose username is paulproteus
answer = Answer.objects.get()
self.assertEqual(answer.text, POST_data['answer__text'])
self.assertEqual(answer.author.username, 'paulproteus')
# Finally, go to the project page and make sure that our Answer has appeared
response = self.client.get(p.get_url())
self.assertContains(response, answer_text)
class CreateAnswer(TwillTests):
fixtures = ['user-paulproteus']
def test_create_answer(self):
p = Project.create_dummy()
q = ProjectInvolvementQuestion.create_dummy(
key_string='where_to_start', is_bug_style=False)
# POST some text to the answer creation post handler
POST_data = {
'project__pk': p.pk,
'question__pk': q.pk,
'answer__text': """Help produce official documentation, share \
the solution to a problem, or check, proof and test other documents for \
accuracy.""",
}
self.login_with_client().post(reverse(mysite.project.views.create_answer_do), POST_data)
# If this were an Ajaxy post handler, we might assert something about
# the response, like
# self.assertEqual(response.content, '1')
# check that the db contains a record with this text
try:
record = Answer.objects.get(text=POST_data['answer__text'])
except Answer.DoesNotExist:
print "All Answers:", Answer.objects.all()
raise Answer.DoesNotExist
self.assertEqual(record.author, User.objects.get(username='paulproteus'))
self.assertEqual(record.project, p)
self.assertEqual(record.question, q)
# check that the project page now includes this text
project_page = self.client.get(p.get_url())
self.assertContains(project_page, POST_data['answer__text'])
self.assertContains(project_page, record.author.username)
def test_multiparagraph_answer(self):
"""
If a multi-paragraph answer is submitted, display it as a
multi-paragraph answer.
"""
# go to the project page
p = Project.create_dummy(name='Ubuntu')
q = ProjectInvolvementQuestion.create_dummy(
key_string='where_to_start', is_bug_style=False)
q.save()
text = ['This is a multiparagraph answer.',
'This is the second paragraph.',
'This is the third paragraph.']
POST_data = {
'project__pk': p.pk,
'question__pk': q.pk,
'answer__text': "\n".join(text)
}
POST_handler = reverse(mysite.project.views.create_answer_do)
self.login_with_client().post(POST_handler, POST_data)
project_page = self.login_with_client().get(p.get_url())
# Django documents publicly that linebreaks replaces one "\n" with "<br />".
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#linebreaks
self.assertContains(project_page, "<br />".join(text))
class TestBugClassTimestamp(TwillTests):
def test_on_mark_looks_closed(self):
# There's no Timestamp for Bug class yet, right?
now = mysite.base.models.Timestamp.get_timestamp_for_string(str(mysite.search.models.Bug))
self.assertEqual(now, mysite.base.models.Timestamp.ZERO_O_CLOCK)
# Making a Bug should not bump the Bug class Timestamp
p = mysite.search.models.Project.create_dummy()
b = mysite.search.models.Bug.create_dummy(project=p)
now = mysite.base.models.Timestamp.get_timestamp_for_string(str(mysite.search.models.Bug))
self.assertEqual(now,
mysite.base.models.Timestamp.ZERO_O_CLOCK)
# Setting the bug to looks_closed should bump the Bug class Timestamp
b.looks_closed = True
b.save()
# Now it's higher, right?
now = mysite.base.models.Timestamp.get_timestamp_for_string(str(mysite.search.models.Bug))
self.assert_(now > mysite.base.models.Timestamp.ZERO_O_CLOCK)
def test_on_delete(self):
# There's no Timestamp for Bug class yet, right?
now = mysite.base.models.Timestamp.get_timestamp_for_string(str(mysite.search.models.Bug))
self.assertEqual(now, mysite.base.models.Timestamp.ZERO_O_CLOCK)
# Making a Bug should not bump the Bug class Timestamp
p = mysite.search.models.Project.create_dummy()
b = mysite.search.models.Bug.create_dummy(project=p)
now = mysite.base.models.Timestamp.get_timestamp_for_string(str(mysite.search.models.Bug))
self.assertEqual(now,
mysite.base.models.Timestamp.ZERO_O_CLOCK)
# Deleting that Bug should bump the Bug class Timestamp
b.delete()
later = mysite.base.models.Timestamp.get_timestamp_for_string(str(mysite.search.models.Bug))
self.assert_(later > now)
class BugKnowsItsFreshness(TestCase):
def test(self):
b = mysite.search.models.Bug.create_dummy_with_project()
b.last_polled = datetime.datetime.now()
self.assertTrue(b.data_is_more_fresh_than_one_day())
b.last_polled -= datetime.timedelta(
days=1, hours=1)
self.assertFalse(b.data_is_more_fresh_than_one_day())
class WeCanPollSomethingToCheckIfAProjectIconIsLoaded(TestCase):
def test(self):
# Create a dummy project
p = Project.create_dummy()
# Make sure its ohloh icon download time is null
self.assertEqual(p.date_icon_was_fetched_from_ohloh, None)
# get the thing we poll
response = self.client.get(reverse(
mysite.search.views.project_has_icon,
kwargs={'project_name': p.name}))
self.assertEqual(response.content, 'keep polling')
# okay, so now say we finished polling
p.date_icon_was_fetched_from_ohloh = datetime.datetime.utcnow()
p.save()
# so what now?
response = self.client.get(reverse(
mysite.search.views.project_has_icon,
kwargs={'project_name': p.name}))
self.assertEqual(response.content, p.get_url_of_icon_or_generic())
class BugCanRefreshItself(TestCase):
@mock.patch('mysite.customs.bugtrackers.BugTracker.refresh_one_bug')
def test_from_static_class(self, mock_refresh_one):
refresh_was_successfully_called = False
bt = mysite.customs.bugtrackers.BugTracker()
b = mysite.search.models.Bug.create_dummy_with_project()
b.set_bug_tracker_class_from_instance(bt)
b.bug_tracker.make_instance().refresh_one_bug(b)
self.assertTrue(mock_refresh_one.called)
# vim: set nu ai et ts=4 sw=4:
|
jledbetter/openhatch
|
mysite/search/tests.py
|
Python
|
agpl-3.0
| 66,967
|
[
"VisIt"
] |
dbbe2b3100b756ce4da1b3ed0549ff0dce94634819aacb717f1fb4c008eaf8b5
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Christopher M. Bruns
# Contributors: Robert McGibbon, Jason Swails
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""
element.py: Used for managing elements.
This is part of the OpenMM molecular simulation toolkit originating from
Simbios, the NIH National Center for Physics-Based Simulation of
Biological Structures at Stanford, funded under the NIH Roadmap for
Medical Research, grant U54 GM072970. See https://simtk.org.
Portions copyright (c) 2012 Stanford University and the Authors.
Authors: Christopher M. Bruns
Contributors: Robert T. McGibbon
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function, division
import numpy as np
from mdtraj.utils.unit.quantity import is_quantity
from mdtraj.utils.unit.unit_definitions import daltons
class Element(tuple):
"""An Element represents a chemical element.
The mdtraj.pdb.element module contains objects for all the standard chemical elements,
such as element.hydrogen or element.carbon. You can also call the static method
Element.getBySymbol() to look up the Element with a particular chemical symbol."""
__slots__ = []
_elements_by_symbol = {}
_elements_by_atomic_number = {}
def __new__(cls, number, name, symbol, mass, radius):
"""Create a new element
Parameters
----------
number : int
The atomic number of the element
name : str
The name of the element
symbol : str
The chemical symbol of the element
mass : float
The atomic mass of the element
radius : float
The van der Waals radius of the element, in nm.
"""
newobj = tuple.__new__(cls, (number, name, symbol, mass, radius))
# Index this element in a global table
s = symbol.strip().upper()
assert s not in Element._elements_by_symbol
Element._elements_by_symbol[s] = newobj
if number in Element._elements_by_atomic_number:
other_element = Element._elements_by_atomic_number[number]
if mass < other_element.mass:
# If two "elements" share the same atomic number, they're
# probably hydrogen and deuterium, and we want to choose
# the lighter one to put in the table by atomic_number,
# since it's the "canonical" element.
Element._elements_by_atomic_number[number] = newobj
else:
Element._elements_by_atomic_number[number] = newobj
return newobj
def __reduce__(self):
# __reduce__ is part of the pickle protocol. we need to make sure that
# elements still act as singletons after a pickle load/save cycle --
# so load() has to *not create* a new object.
# see http://docs.python.org/3.3/library/pickle.html#object.__reduce__
# relevant test:
# >>> cPickle.loads(cPickle.dumps(md.load(get_fn('bpti.pdb')).topology))
return str(self.name)
@staticmethod
def getBySymbol(symbol):
"""Get the Element with a particular chemical symbol
Parameters
----------
symbol : str
Returns
-------
element : Element
"""
s = symbol.strip().upper()
return Element._elements_by_symbol[s]
@staticmethod
def getByAtomicNumber(number):
""" Get the element with a particular atomic number
Parameters
----------
number : int
Returns
-------
element : Element
"""
return Element._elements_by_atomic_number[number]
@staticmethod
def getByMass(mass):
"""Get the element whose mass is CLOSEST to the requested mass. This
method should not be used for repartitioned masses.
Parameters
----------
mass : float
Returns
-------
element : Element
"""
# Convert any masses to daltons
if is_quantity(mass):
mass = mass.value_in_unit(daltons)
diff = mass
best_guess = None
for key in Element._elements_by_atomic_number:
element = Element._elements_by_atomic_number[key]
massdiff = abs(element.mass - mass)
if massdiff < diff:
best_guess = element
diff = massdiff
return best_guess
@property
def number(self):
"""Atomic number."""
return tuple.__getitem__(self, 0)
@property
def name(self):
"""Element name"""
return tuple.__getitem__(self, 1)
@property
def symbol(self):
"""Element symbol"""
return tuple.__getitem__(self, 2)
@property
def mass(self):
"""Element mass"""
return tuple.__getitem__(self, 3)
@property
def radius(self):
"""Element atomic radius
van der Waals radii are taken from A. Bondi, J. Phys. Chem., 68, 441 -
452, 1964, except the value for H, which is taken from R.S.
Rowland & R. Taylor, J.Phys.Chem., 100, 7384 - 7391, 1996. Radii
that are not available in either of these publications have RvdW =
2.00 A. The radii for Ions (Na, K, Cl, Ca, Mg, and Cs are based on
the CHARMM27 Rmin/2 parameters for (SOD, POT, CLA, CAL, MG, CES) by
default.
"""
return tuple.__getitem__(self, 4)
def __getitem__(self, item):
raise TypeError
def __str__(self):
return self.name
@property
def atomic_number(self):
"""Atomic number"""
return tuple.__getitem__(self, 0)
# Make it so only virtual sites evaluate to boolean False (since it's really
# *not* an element)
def __bool__(self):
return bool(self.mass)
def __nonzero__(self):
return bool(self.mass)
# This is for backward compatibility.
def get_by_symbol(symbol):
s = symbol.strip().upper()
return Element._elements_by_symbol[s]
# van der Waals radii are taken from A. Bondi,
# J. Phys. Chem., 68, 441 - 452, 1964,
# except the value for H, which is taken from R.S. Rowland & R. Taylor,
# J.Phys.Chem., 100, 7384 - 7391, 1996. Radii that are not available in
# either of these publications have RvdW = 2.00 A
# The radii for Ions (Na, K, Cl, Ca, Mg, and Cs are based on the CHARMM27
# Rmin/2 parameters for (SOD, POT, CLA, CAL, MG, CES) by default.
virtual = Element( 0,"virtual site","VS", 0.0, 0.0)
hydrogen = Element( 1,"hydrogen","H", 1.007947, 0.12)
deuterium = Element( 1,"deuterium","D", 2.0135532127, 0.12)
helium = Element( 2,"helium","He", 4.003, 0.14)
lithium = Element( 3,"lithium","Li", 6.9412, 0.182)
beryllium = Element( 4,"beryllium","Be", 9.0121823, 0.2)
boron = Element( 5,"boron","B", 10.8117, 0.2)
carbon = Element( 6,"carbon","C", 12.01078, 0.17)
nitrogen = Element( 7,"nitrogen","N", 14.00672, 0.155)
oxygen = Element( 8,"oxygen","O", 15.99943, 0.152)
fluorine = Element( 9,"fluorine","F", 18.99840325, 0.147)
neon = Element( 10,"neon","Ne", 20.17976, 0.154)
sodium = Element( 11,"sodium","Na", 22.989769282, 0.136)
magnesium = Element( 12,"magnesium","Mg", 24.30506, 0.118)
aluminum = Element( 13,"aluminum","Al", 26.98153868, 0.2)
silicon = Element( 14,"silicon","Si", 28.08553, 0.21)
phosphorus = Element( 15,"phosphorus","P", 30.9737622, 0.18)
sulfur = Element( 16,"sulfur","S", 32.0655, 0.18)
chlorine = Element( 17,"chlorine","Cl", 35.4532, 0.227)
argon = Element( 18,"argon","Ar", 39.9481, 0.188)
potassium = Element( 19,"potassium","K", 39.09831, 0.176)
calcium = Element( 20,"calcium","Ca", 40.0784, 0.137)
scandium = Element( 21,"scandium","Sc", 44.9559126, 0.2)
titanium = Element( 22,"titanium","Ti", 47.8671, 0.2)
vanadium = Element( 23,"vanadium","V", 50.94151, 0.2)
chromium = Element( 24,"chromium","Cr", 51.99616, 0.2)
manganese = Element( 25,"manganese","Mn", 54.9380455, 0.2)
iron = Element( 26,"iron","Fe", 55.8452, 0.2)
cobalt = Element( 27,"cobalt","Co", 58.9331955, 0.2)
nickel = Element( 28,"nickel","Ni", 58.69342, 0.163)
copper = Element( 29,"copper","Cu", 63.5463, 0.14)
zinc = Element( 30,"zinc","Zn", 65.4094, 0.139)
gallium = Element( 31,"gallium","Ga", 69.7231, 0.107)
germanium = Element( 32,"germanium","Ge", 72.641, 0.2)
arsenic = Element( 33,"arsenic","As", 74.921602, 0.185)
selenium = Element( 34,"selenium","Se", 78.963, 0.19)
bromine = Element( 35,"bromine","Br", 79.9041, 0.185)
krypton = Element( 36,"krypton","Kr", 83.7982, 0.202)
rubidium = Element( 37,"rubidium","Rb", 85.46783, 0.2)
strontium = Element( 38,"strontium","Sr", 87.621, 0.2)
yttrium = Element( 39,"yttrium","Y", 88.905852, 0.2)
zirconium = Element( 40,"zirconium","Zr", 91.2242, 0.2)
niobium = Element( 41,"niobium","Nb", 92.906382, 0.2)
molybdenum = Element( 42,"molybdenum","Mo", 95.942, 0.2)
technetium = Element( 43,"technetium","Tc", 98, 0.2)
ruthenium = Element( 44,"ruthenium","Ru", 101.072, 0.2)
rhodium = Element( 45,"rhodium","Rh", 102.905502, 0.2)
palladium = Element( 46,"palladium","Pd", 106.421, 0.163)
silver = Element( 47,"silver","Ag", 107.86822, 0.172)
cadmium = Element( 48,"cadmium","Cd", 112.4118, 0.158)
indium = Element( 49,"indium","In", 114.8183, 0.193)
tin = Element( 50,"tin","Sn", 118.7107, 0.217)
antimony = Element( 51,"antimony","Sb", 121.7601, 0.2)
tellurium = Element( 52,"tellurium","Te", 127.603, 0.206)
iodine = Element( 53,"iodine","I", 126.904473, 0.198)
xenon = Element( 54,"xenon","Xe", 131.2936, 0.216)
cesium = Element( 55,"cesium","Cs", 132.90545192, 0.21)
barium = Element( 56,"barium","Ba", 137.3277, 0.2)
lanthanum = Element( 57,"lanthanum","La", 138.905477, 0.2)
cerium = Element( 58,"cerium","Ce", 140.1161, 0.2)
praseodymium = Element( 59,"praseodymium","Pr", 140.907652, 0.2)
neodymium = Element( 60,"neodymium","Nd", 144.2423, 0.2)
promethium = Element( 61,"promethium","Pm", 145, 0.2)
samarium = Element( 62,"samarium","Sm", 150.362, 0.2)
europium = Element( 63,"europium","Eu", 151.9641, 0.2)
gadolinium = Element( 64,"gadolinium","Gd", 157.253, 0.2)
terbium = Element( 65,"terbium","Tb", 158.925352, 0.2)
dysprosium = Element( 66,"dysprosium","Dy", 162.5001, 0.2)
holmium = Element( 67,"holmium","Ho", 164.930322, 0.2)
erbium = Element( 68,"erbium","Er", 167.2593, 0.2)
thulium = Element( 69,"thulium","Tm", 168.934212, 0.2)
ytterbium = Element( 70,"ytterbium","Yb", 173.043, 0.2)
lutetium = Element( 71,"lutetium","Lu", 174.9671, 0.2)
hafnium = Element( 72,"hafnium","Hf", 178.492, 0.2)
tantalum = Element( 73,"tantalum","Ta", 180.947882, 0.2)
tungsten = Element( 74,"tungsten","W", 183.841, 0.2)
rhenium = Element( 75,"rhenium","Re", 186.2071, 0.2)
osmium = Element( 76,"osmium","Os", 190.233, 0.2)
iridium = Element( 77,"iridium","Ir", 192.2173, 0.2)
platinum = Element( 78,"platinum","Pt", 195.0849, 0.172)
gold = Element( 79,"gold","Au", 196.9665694, 0.166)
mercury = Element( 80,"mercury","Hg", 200.592, 0.155)
thallium = Element( 81,"thallium","Tl", 204.38332, 0.196)
lead = Element( 82,"lead","Pb", 207.21, 0.202)
bismuth = Element( 83,"bismuth","Bi", 208.980401, 0.2)
polonium = Element( 84,"polonium","Po", 209, 0.2)
astatine = Element( 85,"astatine","At", 210, 0.2)
radon = Element( 86,"radon","Rn", 222.018, 0.2)
francium = Element( 87,"francium","Fr", 223, 0.2)
radium = Element( 88,"radium","Ra", 226, 0.2)
actinium = Element( 89,"actinium","Ac", 227, 0.2)
thorium = Element( 90,"thorium","Th", 232.038062, 0.2)
protactinium = Element( 91,"protactinium","Pa", 231.035882, 0.2)
uranium = Element( 92,"uranium","U", 238.028913, 0.186)
neptunium = Element( 93,"neptunium","Np", 237, 0.2)
plutonium = Element( 94,"plutonium","Pu", 244, 0.2)
americium = Element( 95,"americium","Am", 243, 0.2)
curium = Element( 96,"curium","Cm", 247, 0.2)
berkelium = Element( 97,"berkelium","Bk", 247, 0.2)
californium = Element( 98,"californium","Cf", 251, 0.2)
einsteinium = Element( 99,"einsteinium","Es", 252, 0.2)
fermium = Element(100,"fermium","Fm", 257, 0.2)
mendelevium = Element(101,"mendelevium","Md", 258, 0.2)
nobelium = Element(102,"nobelium","No", 259, 0.2)
lawrencium = Element(103,"lawrencium","Lr", 262, 0.2)
rutherfordium = Element(104,"rutherfordium","Rf", 261, 0.2)
dubnium = Element(105,"dubnium","Db", 262, 0.2)
seaborgium = Element(106,"seaborgium","Sg", 266, 0.2)
bohrium = Element(107,"bohrium","Bh", 264, 0.2)
hassium = Element(108,"hassium","Hs", 269, 0.2)
meitnerium = Element(109,"meitnerium","Mt", 268, 0.2)
darmstadtium = Element(110,"darmstadtium","Ds", 281, 0.2)
roentgenium = Element(111,"roentgenium","Rg", 272, 0.2)
ununbium = Element(112,"ununbium","Uub", 285, 0.2)
ununtrium = Element(113,"ununtrium","Uut", 284, 0.2)
ununquadium = Element(114,"ununquadium","Uuq", 289, 0.2)
ununpentium = Element(115,"ununpentium","Uup", 288, 0.2)
ununhexium = Element(116,"ununhexium","Uuh", 292, 0.2)
# Aliases to recognize common alternative spellings. Both the '==' and 'is'
# relational operators will work with any chosen name
sulphur = sulfur
aluminium = aluminum
virtual_site = virtual
|
casawa/mdtraj
|
mdtraj/core/element.py
|
Python
|
lgpl-2.1
| 15,631
|
[
"MDTraj",
"OpenMM"
] |
ee91a3ffc21c7a06e42ea6a4845c0d892f31fb29dcf1fe8583a7b999d8d06430
|
#!/usr/bin/env python
"""
An example of how to modify the data visualized via an interactive dialog.
A dialog is created via `TraitsUI
<http://code.enthought.com/projects/traits/>`_ from an object (MyModel).
Some attributes of the objects are represented on the dialog: first a
Mayavi scene, that will host our visualization, and two parameters that
control the data plotted.
A curve is plotted in the embedded scene using the associated
mlab.points3d function. The visualization object created is stored
as an attribute on the main MyModel object, to modify it inplace later.
When the `n_meridional` and `n_longitudinal` attributes are modified, eg via
the slide bars on the dialog, the curve is recomputed, and the
visualization is updated by modifying inplace the stored plot
object (see :ref:`mlab-animating-data`).
This example is discussed in details in the section
:ref:`embedding_mayavi_traits`.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
from numpy import arange, pi, cos, sin
from traits.api import HasTraits, Range, Instance, \
on_trait_change
from traitsui.api import View, Item, Group
from mayavi.core.api import PipelineBase
from mayavi.core.ui.api import MayaviScene, SceneEditor, \
MlabSceneModel
dphi = pi/1000.
phi = arange(0.0, 2*pi + 0.5*dphi, dphi, 'd')
def curve(n_mer, n_long):
mu = phi*n_mer
x = cos(mu) * (1 + cos(n_long * mu/n_mer)*0.5)
y = sin(mu) * (1 + cos(n_long * mu/n_mer)*0.5)
z = 0.5 * sin(n_long*mu/n_mer)
t = sin(mu)
return x, y, z, t
class MyModel(HasTraits):
n_meridional = Range(0, 30, 6, )#mode='spinner')
n_longitudinal = Range(0, 30, 11, )#mode='spinner')
scene = Instance(MlabSceneModel, ())
plot = Instance(PipelineBase)
# When the scene is activated, or when the parameters are changed, we
# update the plot.
@on_trait_change('n_meridional,n_longitudinal,scene.activated')
def update_plot(self):
x, y, z, t = curve(self.n_meridional, self.n_longitudinal)
if self.plot is None:
self.plot = self.scene.mlab.plot3d(x, y, z, t,
tube_radius=0.025, colormap='Spectral')
else:
self.plot.mlab_source.set(x=x, y=y, z=z, scalars=t)
# The layout of the dialog created
view = View(Item('scene', editor=SceneEditor(scene_class=MayaviScene),
height=250, width=300, show_label=False),
Group(
'_', 'n_meridional', 'n_longitudinal',
),
resizable=True,
)
my_model = MyModel()
my_model.configure_traits()
|
dmsurti/mayavi
|
examples/mayavi/interactive/mlab_interactive_dialog.py
|
Python
|
bsd-3-clause
| 2,716
|
[
"Mayavi"
] |
5e7bfd13064b69d6d1cd3e636fb7df00e624509b00008f06187dd9a5e72d1aa2
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from robotide.lib.robot.utils import NormalizedDict
from .criticality import Criticality
from .stats import TagStat, CombinedTagStat
from .tags import TagPatterns
class TagStatistics(object):
"""Container for tag statistics.
"""
def __init__(self, combined_stats):
#: Dictionary, where key is the name of the tag as a string and value
#: is an instance of :class:`~robot.model.stats.TagStat`.
self.tags = NormalizedDict(ignore=['_'])
#: Dictionary, where key is the name of the created tag as a string
# and value is an instance of :class:`~robot.model.stats.TagStat`.
self.combined = combined_stats
def visit(self, visitor):
visitor.visit_tag_statistics(self)
def __iter__(self):
return iter(sorted(self.tags.values() + self.combined))
class TagStatisticsBuilder(object):
def __init__(self, criticality=None, included=None, excluded=None,
combined=None, docs=None, links=None):
self._included = TagPatterns(included)
self._excluded = TagPatterns(excluded)
self._info = TagStatInfo(criticality, docs, links)
self.stats = TagStatistics(self._info.get_combined_stats(combined))
def add_test(self, test):
self._add_tags_to_statistics(test)
self._add_to_combined_statistics(test)
def _add_tags_to_statistics(self, test):
for tag in test.tags:
if self._is_included(tag):
if tag not in self.stats.tags:
self.stats.tags[tag] = self._info.get_stat(tag)
self.stats.tags[tag].add_test(test)
def _is_included(self, tag):
if self._included and not self._included.match(tag):
return False
return not self._excluded.match(tag)
def _add_to_combined_statistics(self, test):
for comb in self.stats.combined:
if comb.match(test.tags):
comb.add_test(test)
class TagStatInfo(object):
def __init__(self, criticality=None, docs=None, links=None):
self._criticality = criticality or Criticality()
self._docs = [TagStatDoc(*doc) for doc in docs or []]
self._links = [TagStatLink(*link) for link in links or []]
def get_stat(self, tag):
return TagStat(tag, self.get_doc(tag), self.get_links(tag),
self._criticality.tag_is_critical(tag),
self._criticality.tag_is_non_critical(tag))
def get_combined_stats(self, combined=None):
return [self.get_combined_stat(*comb) for comb in combined or []]
def get_combined_stat(self, pattern, name=None):
name = name or pattern
return CombinedTagStat(pattern, name, self.get_doc(name),
self.get_links(name))
def get_doc(self, tag):
return ' & '.join(doc.text for doc in self._docs if doc.match(tag))
def get_links(self, tag):
return [link.get_link(tag) for link in self._links if link.match(tag)]
class TagStatDoc(object):
def __init__(self, pattern, doc):
self._matcher = TagPatterns(pattern)
self.text = doc
def match(self, tag):
return self._matcher.match(tag)
class TagStatLink(object):
_match_pattern_tokenizer = re.compile('(\*|\?+)')
def __init__(self, pattern, link, title):
self._regexp = self._get_match_regexp(pattern)
self._link = link
self._title = title.replace('_', ' ')
def match(self, tag):
return self._regexp.match(tag) is not None
def get_link(self, tag):
match = self._regexp.match(tag)
if not match:
return None
link, title = self._replace_groups(self._link, self._title, match)
return link, title
def _replace_groups(self, link, title, match):
for index, group in enumerate(match.groups()):
placefolder = '%%%d' % (index+1)
link = link.replace(placefolder, group)
title = title.replace(placefolder, group)
return link, title
def _get_match_regexp(self, pattern):
pattern = '^%s$' % ''.join(self._yield_match_pattern(pattern))
return re.compile(pattern, re.IGNORECASE)
def _yield_match_pattern(self, pattern):
for token in self._match_pattern_tokenizer.split(pattern):
if token.startswith('?'):
yield '(%s)' % ('.'*len(token))
elif token == '*':
yield '(.*)'
else:
yield re.escape(token)
|
fingeronthebutton/RIDE
|
src/robotide/lib/robot/model/tagstatistics.py
|
Python
|
apache-2.0
| 5,149
|
[
"VisIt"
] |
b57d669e675e49f97d321a3339cdae420eb4405bcd0bb087bd9bf9f22829d0d4
|
print('Will plot single galaxy luminosity density profiles')
import astropy.table as table
from defcuts import *
from def_get_mags import *
from my_def_plots import *
from defflags import many_flags
import matplotlib.pyplot as plt
indir='/Users/amandanewmark/repositories/galaxy_dark_matter/GAH/'
outdir='/Users/amandanewmark/repositories/galaxy_dark_matter/lumprofplots/single_plot/'
datatab = table.Table.read(indir+ 'LOWZ_HSCGAMA15_apmgs.fits')
bands=['g', 'r', 'i','z', 'y']
parm=['flags_pixel_saturated_center','flags_pixel_edge','flags_pixel_interpolated_center','flags_pixel_cr_center','flags_pixel_suspect_center', 'flags_pixel_clipped_any','flags_pixel_bad']
daperture=[1.01,1.51,2.02,3.02,4.03,5.71,8.40,11.8,16.8,23.5]
aperture=[x*0.5 for x in daperture]
#get rid of cuts
mincut=0.1
maxcut=''
cutdatag, crange=out_cut(datatab, bands[0], 'mag_aperture00',mincut, maxcut)
cutdatai, crange=out_cut(cutdatag, bands[2], 'mag_aperture00',mincut, maxcut)
cutdatar, crange=out_cut(cutdatai, bands[1], 'mag_aperture00',mincut, maxcut)
cutdatay, crange=out_cut(cutdatar, bands[4], 'mag_aperture00', mincut, maxcut)
cutdataz, crange=out_cut(cutdatay, bands[3], 'mag_aperture00',mincut, maxcut)
ne=[199.99, 99.99]
cutdata1=not_cut(cutdataz, bands[1], 'mag_aperture00', ne)
cutdata=not_cut(cutdata1, bands[0], 'mag_aperture00', ne)
#get rid of flagged galaxies
newdata=many_flags(cutdata, parm, 'i') #I think flags are only in band i
Naps=len(aperture)
objID=newdata['object_id']
redshift=newdata['Z']
Ndat=len(redshift)
DM= get_zdistmod(newdata, 'Z')
kcorrect=get_kcorrect2(newdata,'mag_aperture0', '_err', bands, '0', 'hsc_filters.dat', redshift)
#for n in range(0, Ndat,10):
for n in range(0,Ndat):
#this goes through every galaxy
name=objID[n]
name=str(name)
LG=[]
LR=[]
LI=[]
LZ=[]
LY=[]
radkpc=aper_and_comov(aperture, redshift[n])
for a in range(0, Naps):
#this goes through every aperture
ns=str(a)
print(ns)
#get magnitude
absg, absr, absi, absz, absy= abs_mag(newdata[n], 'mag_aperture0', kcorrect, DM[n], bands, ns, n)
Lumg, Lumr, Lumi, Lumz, Lumy=abs2lum(absg, absr, absi, absz, absy)
Lg, Lr, Li, Lz, Ly=lumdensity(Lumg, Lumr, Lumi, Lumz, Lumy, radkpc[a])
LG.append(Lg)
LR.append(Lr)
LI.append(Li)
LZ.append(Lz)
LY.append(Ly)
# break
#creating luminosity densities for the apertures at each band
print('Galaxy # ', n)
lum_comov_plot(LG, LR, LI, LZ, LY, radkpc, name, outdir)
#will eventually need comoving
|
anewmark/galaxy_dark_matter
|
not currently in use/call_get_mags.py
|
Python
|
mit
| 2,488
|
[
"Galaxy"
] |
f33dd84b7f585ea14655ae34e6a4ef7f60729ea6fc315a8045b7a4d50690b50a
|
NEW_REQUEST_EMAIL_TEXT="""
Hello LiPAD Admins,
A new {} request has been submitted. You can view the request using the following link:
{}
"""
NEW_REQUEST_EMAIL_HTML="""
<p>Hello LiPAD Admins,</p>
<p>A new {} request has been submitted. You can view the request using the following link:</p>
<p><a rel="nofollow" target="_blank" href="{}">{}</a></p>
"""
VERIFICATION_EMAIL_TEXT="""
Dear <strong>{}</strong>,
Please paste the following URL in your browser to verify your email and complete your Data Request Registration.
https://{}
If clicking does not work, try copying and pasting the link to your browser's address bar.
For inquiries, you can contact us as at {}.
Regards,
LiPAD Team
"""
VERIFICATION_EMAIL_HTML= """
<p>Dear <strong>{}</strong>,</p>
<p>Please click on the following link to verify your email and complete your Data Request Registration.</p>
<p><a rel="nofollow" target="_blank" href="https://{}">https://{}</a></p>
<p>If clicking does not work, try copying and pasting the link to your browser's address bar.</p>
<p>For inquiries, you can contact us as at <a href="mailto:{}" target="_top">{}</a></p>
</br>
<p>Regards,</p>
<p>LiPAD Team</p>
"""
PROFILE_APPROVAL_TEXT= """
Dear {},
Your account registration for LiPAD was approved.
You will now be able to log in using the following log-in credentials:
username: {}
Before you are able to login to LiPAD, visit first https://ssp.dream.upd.edu.ph/?action=sendtoken and follow the instructions to reset a password for your account.
You will be able to edit your account details by logging in and going to the following link:
{}
To download DTMs, DSMs, Classified LAZ and Orthophotos, please proceed to http://lipad.dream.upd.edu.ph/maptiles after logging in.
To download Flood Hazard Maps, Resource Layers and other datasets, please proceed to http://lipad.dream.upd.edu.ph/layers/.
If you have any questions, you can contact us as at {}.
Regards,
LiPAD Team
"""
PROFILE_APPROVAL_HTML = """
<p>Dear <strong>{}</strong>,</p>
<p>Your account registration for LiPAD was approved. You will now be able to log in using the following log-in credentials:</p>
username: <strong>{}</strong></p>
<p>Before you are able to login to LiPAD, follow the instructions in this <a href="https://ssp.dream.upd.edu.ph/?action=sendtoken">link</a> to be able to reset or assign a password for your account</p></br>
<p>You will be able to edit your account details by logging in and going to the following link:</p>
{}
</br>
<p>To download DTMs, DSMs, Classified LAZ and Orthophotos, please proceed to <a href="http://lipad.dream.upd.edu.ph/maptiles">Data Tiles Section</a> under Data Store after logging in.</p>
<p>To download Flood Hazard Maps, Resource Layers and other datasets, please proceed to <a href="http://lipad.dream.upd.edu.ph/layers/">Layers Section</a> under Data Store.</p>
<p>If you have any questions, you can contact us as at <a href="mailto:{}" target="_top">{}</a></p>
</br>
<p>Regards,</p>
<p>LiPAD Team</p>
"""
PROFILE_REJECTION_TEXT="""
Dear {},
Your account registration for LiPAD was not approved.
Reason: {}
{}
If you have further questions, you can contact us as at {}.
Regards,
LiPAD Team
"""
PROFILE_REJECTION_HTML="""
<p>Dear <strong>{}</strong>,</p>
<p>Your account registration for LiPAD was not approved.</p>
<p>Reason: {} <br/>
{}</p>
<p>If you have further questions, you can contact us as at <a href="mailto:{}" target="_top">{}</a></p>
</br>
<p>Regards,</p>
<p>LiPAD Team</p>
"""
DATA_APPROVAL_TEXT="""
Dear {},
Your current data request for LiPAD was approved.
To download DTMs, DSMs, Classified LAZ and Orthophotos, please proceed to http://lipad.dream.upd.edu.ph/maptiles after logging in.
To download Flood Hazard Maps, Resource Layers and other datasets, please proceed to http://lipad.dream.upd.edu.ph/layers/.
To download DTMs, DSMs, Classified LAZ and Orthophotos, please proceed to http://lipad.dream.upd.edu.ph/maptiles after logging in.
To download Flood Hazard Maps, Resource Layers and other datasets, please proceed to http://lipad.dream.upd.edu.ph/layers/.
If you have any questions, you can contact us as at {}.
Regards,
LiPAD Team
"""
DATA_APPROVAL_HTML= """
<p>Dear <strong>{}</strong>,</p>
<p>Your current data request in LiPAD was approved.</p>
<p>To download DTMs, DSMs, Classified LAZ and Orthophotos, please proceed to <a href="http://lipad.dream.upd.edu.ph/maptiles">Data Tiles Section</a> under Data Store after logging in.</p>
<p>To download Flood Hazard Maps, Resource Layers and other datasets, please proceed to <a href="http://lipad.dream.upd.edu.ph/layers/">Layers Section</a> under Data Store.</p>
<p>If you have any questions, you can contact us as at <a href="mailto:{}" target="_top">{}</a></p>
</br>
<p>Regards,</p>
<p>LiPAD Team</p>
"""
DATA_REJECTION_TEXT="""
Dear {},
Your data request for LiPAD was not approved.
Reason: {}
{}
If you have further questions, you can contact us as at {}.
Regards,
LiPAD Team
"""
DATA_REJECTION_HTML= """
<p>Dear <strong>{}</strong>,</p>
<p>Your data request for LiPAD was not approved.</p>
<p>Reason: {} <br/>
{}</p>
<p>If you have further questions, you can contact us as at <a href="mailto:{}" target="_top">{}</a></p>
</br>
<p>Regards,</p>
<p>LiPAD Team</p>
"""
DATA_SUC_REQUEST_NOTIFICATION_TEXT="""
Greetings, {} {}!
We are informing you that a data requester has requested data which is too big for an FTP transfer to handle.
We are asking your permission to forward the data to your team and let the requester retrieve the data from you.
Listed below are the details of the data requested:
Name of requester: {} {}
Organization: {}
Email Address: {}
Project Summary: {}
Type of Data Requested: {}
Intended Use of the Data: {}
We hope to hear your response at the soonest possible time. Thank you!
Regards,
LiPAD Team
"""
DATA_SUC_REQUEST_NOTIFICATION_HTML="""
<p>Greetings, {} {}!</p>
<p>We are informing you that a data requester has requested data which is too big for an FTP transfer to handle.
We are asking your permission to forward the data to your team and let the requester retrieve the data from you. </p>
<br/>
<p>Listed below are the details of the data requested:<br />
Name of requester: {} {}<br />
Organization: {}<br />
Email Address: {}<br />
Project Summary: {}<br />
Type of Data Requested: {}<br />
Intended Use of the Data: {}</p>
<p>We hope to hear your response at the soonest possible time. Thank you!<p>
<br />
<p>Regards,</p>
<p>LiPAD Team</p>
"""
DATA_SUC_JURISDICTION_TEXT ="""
Greetings, {} {}!
Here is the link to the user's uploaded area of interest: {}
We have already notified the user about forwarding the request to you. Thank you very much for your assistance.
Regards,
LiPAD Team
"""
DATA_SUC_JURISDICTION_HTML ="""
<p>Greetings, {} {}!</p>
<br />
<p>Here is the link to the user's uploaded area of interest: <a href={} >{}</a></p>
<br />
<p>We have already notified the user about forwarding the request to you. Thank you very much for your assistance.</p>
<br />
<p>Regards,</p>
<p>LiPAD Team</p>
"""
DATA_USER_FORWARD_NOTIFICATION_TEXT="""
Greetings, {} {}!
Your request has now been forwarded to the {} Phil-LiDAR 1 office. Please send your follow-up queries to their office instead.
Regards,
LiPAD Team
"""
DATA_USER_FORWARD_NOTIFICATION_HTML="""
<p>Greetings, {} {}!</p>
<br />
<p>Your request data has now been forwarded to the {} Phil-LiDAR 1 office. Please send your follow-up queries to their office instead.</p>
<br />
<p>Regards,</p>
<p>LiPAD Team</p>
"""
DATA_USER_PRE_FORWARD_NOTIFICATION_TEXT="""
Greetings, {} {}!
We sent this email to inform you that the estimated size of your data (in bytes) exceeds 10GB (gigabytes) and it maybe more convenient for you to visit the office to copy the files.
As such, we would like to know which option you prefer, to copy the files in the UP Diliman office of Phil-LiDAR 1, or to visit the partner SUC which was assigned over your area of interest.
The assigned SUC for your area of interest is {} ({}) Phil-LiDAR 1 under {} {}.
We hope to hear your response at the soonest possible time. Thank you!
Regards,
LiPAD Team
"""
DATA_USER_PRE_FORWARD_NOTIFICATION_HTML="""
<p>Greetings, {} {}!</p>
<p>We would like to inform you that the estimated size of your data (in bytes) exceeds 10GB (gigabytes) and it maybe more convenient for you to visit a Phil-LiDAR 1 office to copy the files.
As such, we would like to know which option you prefer, to copy the files in the UP Diliman office of Phil-LiDAR 1, or to visit the partner SUC which was assigned over your area of interest.
The assigned SUC for your area of interest is {} ({}) Phil-LiDAR 1 under {} {}.</p>
<p>We hope to hear your response at the soonest possible time. Thank you!</p>
<p>Regards,</p>
<p>LiPAD Team</p>
"""
|
PhilLidar-DAD/geonode
|
geonode/datarequests/email_utils.py
|
Python
|
gpl-3.0
| 9,512
|
[
"VisIt"
] |
ce988d91919c0754621058d486811aa3b0962bff7482fde94ca4dd33d0746679
|
input_name = '../examples/navier_stokes/navier_stokes2d.py'
output_name = 'test_navier_stokes2d.vtk'
from tests_basic import TestInput
class Test(TestInput):
pass
|
RexFuzzle/sfepy
|
tests/test_input_navier_stokes2d.py
|
Python
|
bsd-3-clause
| 168
|
[
"VTK"
] |
c2ec8011d57d289601045b1d7da51a4580a2287c051885276e7509d4f638131d
|
import os
import pathlib
import tempfile
import time
import tracemalloc
import dufte
import matplotlib.pyplot as plt
import meshzoo
import numpy as np
import meshio
def generate_triangular_mesh():
p = pathlib.Path("sphere.xdmf")
if pathlib.Path.is_file(p):
mesh = meshio.read(p)
else:
points, cells = meshzoo.icosa_sphere(300)
mesh = meshio.Mesh(points, {"triangle": cells})
mesh.write(p)
return mesh
def generate_tetrahedral_mesh():
"""Generates a fairly large mesh."""
if pathlib.Path.is_file("cache.xdmf"):
mesh = meshio.read("cache.xdmf")
else:
import pygalmesh
s = pygalmesh.Ball([0, 0, 0], 1.0)
mesh = pygalmesh.generate_mesh(s, cell_size=2.0e-2, verbose=True)
# mesh = pygalmesh.generate_mesh(s, cell_size=1.0e-1, verbose=True)
mesh.cells = {"tetra": mesh.cells["tetra"]}
mesh.point_data = []
mesh.cell_data = {"tetra": {}}
mesh.write("cache.xdmf")
return mesh
def plot_speed(names, elapsed_write, elapsed_read):
plt.style.use(dufte.style)
names = np.asarray(names)
elapsed_write = np.asarray(elapsed_write)
elapsed_read = np.asarray(elapsed_read)
fig, ax = plt.subplots(1, 2, figsize=(12, 8))
idx = np.argsort(elapsed_write)[::-1]
ax[0].barh(range(len(names)), elapsed_write[idx], align="center")
ax[0].set_yticks(range(len(names)))
ax[0].set_yticklabels(names[idx])
ax[0].set_xlabel("time (s)")
ax[0].set_title("write")
ax[0].grid()
idx = np.argsort(elapsed_read)[::-1]
ax[1].barh(range(len(names)), elapsed_read[idx], align="center")
ax[1].set_yticks(range(len(names)))
ax[1].set_yticklabels(names[idx])
ax[1].set_xlabel("time (s)")
ax[1].set_title("read")
ax[1].grid()
fig.tight_layout()
# plt.show()
fig.savefig("performance.svg", transparent=True, bbox_inches="tight")
plt.close()
def plot_file_sizes(names, file_sizes, mem_size):
idx = np.argsort(file_sizes)
file_sizes = [file_sizes[i] for i in idx]
names = [names[i] for i in idx]
plt.figure(figsize=(8, 8))
ax = plt.gca()
y_pos = np.arange(len(file_sizes))
ax.barh(y_pos, file_sizes, align="center")
#
ylim = ax.get_ylim()
plt.plot(
[mem_size, mem_size], [-2, len(file_sizes) + 2], "C3", linewidth=2.0, zorder=0
)
ax.set_ylim(ylim)
#
ax.set_yticks(y_pos)
ax.set_yticklabels(names)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel("file size [MB]")
ax.set_title("file sizes")
plt.grid()
# plt.show()
plt.savefig("filesizes.svg", transparent=True, bbox_inches="tight")
plt.close()
def plot_memory_usage(names, peak_memory_write, peak_memory_read, mem_size):
names = np.asarray(names)
peak_memory_write = np.asarray(peak_memory_write)
peak_memory_read = np.asarray(peak_memory_read)
fig, ax = plt.subplots(1, 2, figsize=(12, 8))
idx = np.argsort(peak_memory_write)[::-1]
ax[0].barh(range(len(names)), peak_memory_write[idx], align="center")
ax[0].set_yticks(range(len(names)))
ax[0].set_yticklabels(names[idx])
ax[0].set_xlabel("peak memory [MB]")
ax[0].set_title("write")
ax[0].grid()
# plot memsize of mesh
ylim = ax[0].get_ylim()
ax[0].plot(
[mem_size, mem_size], [-2, len(names) + 2], "C3", linewidth=2.0, zorder=0
)
ax[0].set_ylim(ylim)
idx = np.argsort(peak_memory_read)[::-1]
ax[1].barh(range(len(names)), peak_memory_read[idx], align="center")
ax[1].set_yticks(range(len(names)))
ax[1].set_yticklabels(names[idx])
ax[1].set_xlabel("peak memory [MB]")
ax[1].set_title("read")
ax[1].grid()
# plot memsize of mesh
ylim = ax[1].get_ylim()
ax[1].plot(
[mem_size, mem_size], [-2, len(names) + 2], "C3", linewidth=2.0, zorder=0
)
ax[1].set_ylim(ylim)
fig.tight_layout()
# plt.show()
fig.savefig("memory.svg", transparent=True, bbox_inches="tight")
plt.close()
def read_write(plot=False):
# mesh = generate_tetrahedral_mesh()
mesh = generate_triangular_mesh()
print(mesh)
mem_size = mesh.points.nbytes + mesh.cells[0].data.nbytes
mem_size /= 1024.0**2
print(f"mem_size: {mem_size:.2f} MB")
formats = {
"Abaqus": (meshio.abaqus.write, meshio.abaqus.read, ["out.inp"]),
"Ansys (ASCII)": (
lambda f, m: meshio.ansys.write(f, m, binary=False),
meshio.ansys.read,
["out.ans"],
),
# "Ansys (binary)": (
# lambda f, m: meshio.ansys.write(f, m, binary=True),
# meshio.ansys.read,
# ["out.ans"],
# ),
"AVS-UCD": (meshio.avsucd.write, meshio.avsucd.read, ["out.ucd"]),
# "CGNS": (meshio.cgns.write, meshio.cgns.read, ["out.cgns"]),
"Dolfin-XML": (meshio.dolfin.write, meshio.dolfin.read, ["out.xml"]),
"Exodus": (meshio.exodus.write, meshio.exodus.read, ["out.e"]),
# "FLAC3D": (meshio.flac3d.write, meshio.flac3d.read, ["out.f3grid"]),
"Gmsh 4.1 (ASCII)": (
lambda f, m: meshio.gmsh.write(f, m, binary=False),
meshio.gmsh.read,
["out.msh"],
),
"Gmsh 4.1 (binary)": (
lambda f, m: meshio.gmsh.write(f, m, binary=True),
meshio.gmsh.read,
["out.msh"],
),
"MDPA": (meshio.mdpa.write, meshio.mdpa.read, ["out.mdpa"]),
"MED": (meshio.med.write, meshio.med.read, ["out.med"]),
"Medit": (meshio.medit.write, meshio.medit.read, ["out.mesh"]),
"MOAB": (meshio.h5m.write, meshio.h5m.read, ["out.h5m"]),
"Nastran": (meshio.nastran.write, meshio.nastran.read, ["out.bdf"]),
"Netgen": (meshio.netgen.write, meshio.netgen.read, ["out.vol"]),
"OFF": (meshio.off.write, meshio.off.read, ["out.off"]),
"Permas": (meshio.permas.write, meshio.permas.read, ["out.dato"]),
"PLY (binary)": (
lambda f, m: meshio.ply.write(f, m, binary=True),
meshio.ply.read,
["out.ply"],
),
"PLY (ASCII)": (
lambda f, m: meshio.ply.write(f, m, binary=False),
meshio.ply.read,
["out.ply"],
),
"STL (binary)": (
lambda f, m: meshio.stl.write(f, m, binary=True),
meshio.stl.read,
["out.stl"],
),
"STL (ASCII)": (
lambda f, m: meshio.stl.write(f, m, binary=False),
meshio.stl.read,
["out.stl"],
),
# "TetGen": (meshio.tetgen.write, meshio.tetgen.read, ["out.node", "out.ele"],),
"VTK (binary)": (
lambda f, m: meshio.vtk.write(f, m, binary=True),
meshio.vtk.read,
["out.vtk"],
),
"VTK (ASCII)": (
lambda f, m: meshio.vtk.write(f, m, binary=False),
meshio.vtk.read,
["out.vtk"],
),
"VTU (binary, uncompressed)": (
lambda f, m: meshio.vtu.write(f, m, binary=True, compression=None),
meshio.vtu.read,
["out.vtu"],
),
"VTU (binary, zlib)": (
lambda f, m: meshio.vtu.write(f, m, binary=True, compression="zlib"),
meshio.vtu.read,
["out.vtu"],
),
"VTU (binary, LZMA)": (
lambda f, m: meshio.vtu.write(f, m, binary=True, compression="lzma"),
meshio.vtu.read,
["out.vtu"],
),
"VTU (ASCII)": (
lambda f, m: meshio.vtu.write(f, m, binary=False),
meshio.vtu.read,
["out.vtu"],
),
"Wavefront .obj": (meshio.obj.write, meshio.obj.read, ["out.obj"]),
# "wkt": ".wkt",
"XDMF (binary)": (
lambda f, m: meshio.xdmf.write(f, m, data_format="Binary"),
meshio.xdmf.read,
["out.xdmf", "out0.bin", "out1.bin"],
),
"XDMF (HDF, GZIP)": (
lambda f, m: meshio.xdmf.write(f, m, data_format="HDF", compression="gzip"),
meshio.xdmf.read,
["out.xdmf", "out.h5"],
),
"XDMF (HDF, uncompressed)": (
lambda f, m: meshio.xdmf.write(f, m, data_format="HDF", compression=None),
meshio.xdmf.read,
["out.xdmf", "out.h5"],
),
"XDMF (XML)": (
lambda f, m: meshio.xdmf.write(f, m, data_format="XML"),
meshio.xdmf.read,
["out.xdmf"],
),
}
# formats = {
# # "VTK (ASCII)": formats["VTK (ASCII)"],
# # "VTK (binary)": formats["VTK (binary)"],
# # "VTU (ASCII)": formats["VTU (ASCII)"],
# # "VTU (binary)": formats["VTU (binary)"],
# # "Gmsh 4.1 (binary)": formats["Gmsh 4.1 (binary)"],
# # "FLAC3D": formats["FLAC3D"],
# "MDPA": formats["MDPA"],
# }
# max_key_length = max(len(key) for key in formats)
elapsed_write = []
elapsed_read = []
file_sizes = []
peak_memory_write = []
peak_memory_read = []
print()
print(
"format "
+ "write (s) "
+ "read(s) "
+ "file size "
+ "write mem "
+ "read mem "
)
print()
with tempfile.TemporaryDirectory() as directory:
directory = pathlib.Path(directory)
for name, (writer, reader, filenames) in formats.items():
filename = directory / filenames[0]
tracemalloc.start()
t = time.time()
writer(filename, mesh)
# snapshot = tracemalloc.take_snapshot()
elapsed_write.append(time.time() - t)
peak_memory_write.append(tracemalloc.get_traced_memory()[1])
tracemalloc.stop()
file_sizes.append(sum(os.stat(directory / f).st_size for f in filenames))
tracemalloc.start()
t = time.time()
reader(filename)
elapsed_read.append(time.time() - t)
peak_memory_read.append(tracemalloc.get_traced_memory()[1])
tracemalloc.stop()
print(
"{:<26} {:e} {:e} {:e} {:e} {:e}".format(
name,
elapsed_write[-1],
elapsed_read[-1],
file_sizes[-1] / 1024.0**2,
peak_memory_write[-1] / 1024.0**2,
peak_memory_read[-1] / 1024.0**2,
)
)
names = list(formats.keys())
# convert to MB
file_sizes = np.array(file_sizes)
file_sizes = file_sizes / 1024.0**2
peak_memory_write = np.array(peak_memory_write)
peak_memory_write = peak_memory_write / 1024.0**2
peak_memory_read = np.array(peak_memory_read)
peak_memory_read = peak_memory_read / 1024.0**2
if plot:
plot_speed(names, elapsed_write, elapsed_read)
plot_file_sizes(names, file_sizes, mem_size)
plot_memory_usage(names, peak_memory_write, peak_memory_read, mem_size)
if __name__ == "__main__":
read_write(plot=True)
|
nschloe/meshio
|
tests/performance.py
|
Python
|
mit
| 11,085
|
[
"VTK"
] |
6407bc1edd04fc0ea685a88a0eb8affbc334ca1e2f95d8b2d6cd8f68a74eca07
|
import random
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import cv2
import re, os, glob, pickle, shutil
import random
import Config
class POM_room(object):
#def __init__(self,pom_file_path,parts_root_folder,img_index_list,n_parts,resize_pom = 4,cameras_list = range(7),HW_grid = (-1,-1),Sigma_factor = 2,with_templates = True):
def __init__(self,parts_root_folder= Config.parts_root_folder,with_templates = True):
# Config about POM templates
self.parts_root_folder = parts_root_folder
self.n_parts = Config.n_parts #Number of classes of classifier. Including foreground class which is the last one
self.pom_file_path = Config.pom_file_path
self.resize_pom = Config.resize_pom #Difference in ratio between dimensions for POM file and images saved (4 when images come from VGG)
self.cameras_list = Config.cameras_list
self.n_cams = len(self.cameras_list)
self.img_index_list = Config.img_index_list #images to use
self.image_path_format = self.parts_root_folder + 'c%d/%d.npy'
self.gaussian_params_path = self.parts_root_folder +'gaussian_params.txt'
self.H_grid, self.W_grid = Config.H_grid, Config.W_grid #Usefullf if grid defined
if with_templates:
#Size of parts compared to Sigma
self.Sigma_factor = Config.Sigma_factor
# Config about POM images
self.H,self.W = self.get_HW_from_img()
self.extract_templates()
def get_HW_from_img(self):
'''
Output : Shape of the images we are going to use as input.
'''
im = np.load(self.image_path_format%(0,self.img_index_list[0]))
H,W = im.shape[0:2]
return H,W
def extract_BB_coordinates(self,camera):
'''
In : camera id
Out : List of all bounding boxes coordinates on this view, as defined by the pom file.
'''
f = open(self.pom_file_path, 'r')
lines = f.readlines()
bounding_boxes =[]
current_object =1
for i,line in enumerate(lines):
if line.find('RECTANGLE %d'%camera) > -1:
bounding_boxes.append(self.parse_BB_from_line(line))
return bounding_boxes
def parse_BB_from_line(self,line):
'''
In : line string
Out : coordinates of the box in the parsed line, where we set random 0-size coordinates for non-visible and resize to match the resizing used in the background sub.
'''
resize = self.resize_pom
line_split = line.split(' ')
if line_split[3] == 'notvisible\n':
rand_H,rand_W = random.randint(0,self.H-1),random.randint(0,self.W-1)
return [rand_W,rand_H,rand_W,rand_H]
else:
return [np.int(line_split[3])/resize,np.int(line_split[4])/resize,np.int(line_split[5])/resize,np.int(line_split[6])/resize]
#Extract coordinates of BBs as in normal POM file
def extract_templates(self):
'''
Output: Array of shape (n_cameras*n_parts,n_boxes,4) which contains templates 2D coordinates in projection on each camera.
'''
N_cameras = len(self.cameras_list)
H,W = self.H,self.W#resize_pom)
n_parts = self.n_parts
bboxes_cam_list =[]
for cam in self.cameras_list:
bboxes_cam_list.append(self.extract_BB_coordinates(cam))
#Load the gaussian parameters
gauss_params = np.loadtxt(self.gaussian_params_path,dtype = 'float32')
gauss_params = gauss_params.reshape((n_parts-1,2,4))
templates_array =np.zeros((N_cameras*n_parts,len(bboxes_cam_list[0]),4),dtype = 'int32')
for i in range(0,len(bboxes_cam_list[0])):
for cam in self.cameras_list:
bboxes = bboxes_cam_list[cam]
bb_midx = (bboxes[i][3] + bboxes[i][1])/2
bb_midy = (bboxes[i][2] + bboxes[i][0])/2
bb_sizex = (bboxes[i][3] - bboxes[i][1])
bb_sizey = (bboxes[i][2] - bboxes[i][0])
for part in range(n_parts-1):
alphax = gauss_params[part,0,0]
alphay = gauss_params[part,0,1]
sigmax = gauss_params[part,1,0]
sigmay = gauss_params[part,1,1]
# Compute coordinates of new bb
x0 = bb_midx - (alphax*bb_sizex)/1000 - (sigmax*self.Sigma_factor*bb_sizex)/1000
y0 = bb_midy - (alphay*bb_sizey)/1000 - (sigmay*self.Sigma_factor*bb_sizey)/1000
x1 = bb_midx - (alphax*bb_sizex)/1000 + (sigmax*self.Sigma_factor*bb_sizex)/1000
y1 = bb_midy - (alphay*bb_sizey)/1000 + (sigmay*self.Sigma_factor*bb_sizey)/1000
# Crop coordinates to stay inside image
x0 = max(x0,0)
y0 = max(y0,0)
x1 = min(x1,H-1)
y1 = min(y1,W-1)
if (x1 - x0) > H/150.0 and (y1 - y0) > W/150.0: #Arbitrary criterium tu prevent too small BBs
templates_array[n_parts*cam + part,i,:] = np.asarray([x0,y0,x1,y1])
else:
rand_H,rand_W = random.randint(0,H-1),random.randint(0,W-1)
templates_array[n_parts*cam + part,i,:] = np.asarray([rand_H,rand_W,rand_H,rand_W])
#now add full box in last position
x0 = min(max(bboxes[i][1],0),H-1)
y0 = min(max(bboxes[i][0],0),W-1)
x1 = min(max(bboxes[i][3],0),H-1)
y1 = min(max(bboxes[i][2],0),W-1)
templates_array[n_parts*cam + n_parts - 1,i,:] = np.asarray([x0,y0,x1,y1])
self.templates_array = templates_array
def load_images_stacked_old(self,fid,verbose = False):
im_out = []
for cam in self.cameras_list:
for part in range(self.n_parts):
if verbose:
print "Loading " + self.image_path_format%(cam,part,self.img_index_list[fid])
im = cv2.imread(self.image_path_format%(cam,part,self.img_index_list[fid]))
im_out.append(im[:,:,0]>0)
image = np.asarray(np.stack(im_out))
return image
def load_images_stacked(self,fid,verbose = False):#load results of part
im_out = []
for cam in self.cameras_list:
if verbose:
print "Loading " + self.image_path_format%(cam,self.img_index_list[fid])
im = np.load(self.image_path_format%(cam,self.img_index_list[fid]))
im_out.append(im)
image = np.asarray(np.concatenate(im_out,axis = 2)).transpose((2,0,1))
return image
def get_indices_above(self,image,threshold = 0.6):
n_vars = self.templates_array.shape[1]
img_fg = image[self.n_parts-1::self.n_parts]
templates_fg = self.templates_array[self.n_parts-1::self.n_parts]
aux = np.cumsum(img_fg,axis = 1)
integral_img = np.cumsum(aux,axis = 2)
scores = np.zeros(n_vars)
sizes = np.zeros(n_vars)
for cam in range(templates_fg.shape[0]):
scores += (integral_img[cam,templates_fg[cam,:,0],templates_fg[cam,:,1]] + integral_img[cam,templates_fg[cam,:,2],templates_fg[cam,:,3]] - integral_img[cam,templates_fg[cam,:,0],templates_fg[cam,:,3]] - integral_img[cam,templates_fg[cam,:,2],templates_fg[cam,:,1]])
sizes += np.maximum((templates_fg[cam,:,2]-templates_fg[cam,:,0])*(templates_fg[cam,:,3]-templates_fg[cam,:,1]),4.0)
scores = scores / sizes
return np.where(scores > threshold)[0],scores
def plot_output(self,Q_out,fid,cam,part,thresh = 0.9,iteration = -1,Shift = []):
image =self.load_images_stacked(fid)
img_cam = image[self.n_parts*cam+part]
Q_plot = Q_out[iteration]
if len(Shift) == 0:
templates_cam = self.templates_array[self.n_parts*cam+part]
else:
templates_cam = self.templates_array[self.n_parts*cam+part] + Shift[iteration][self.n_parts*cam+part]
H,W = image.shape[1:]
img_out = np.zeros((H,W,3))
img_out[:,:,0] = img_cam
Q_abs = np.ones((H,W))
for i in range(templates_cam.shape[0]):
if Q_plot[i] > 0.001:
Q_abs[templates_cam[i,0]:templates_cam[i,2],templates_cam[i,1]:templates_cam[i,3]] *= 1-Q_plot[i]
img_out[:,:,2] = 1-Q_abs
if Q_plot[i] > thresh:
cv2.rectangle(img_out,(templates_cam[i,1],templates_cam[i,0]),(templates_cam[i,3],templates_cam[i,2]),(0,1,0))
img_out[:,:,2] = 1-Q_abs
plt.imshow(img_out)
plt.show()
def save_dat(self,Q_out,fid,folder_out,iteration = -1,verbose = False):
out_path= folder_out + '%08d.dat'%self.img_index_list[fid]
if not os.path.exists(folder_out):
os.makedirs(folder_out)
Q_save = Q_out[iteration]
f = open(out_path,'w')
for i in range(Q_save.shape[0]):
string = '%d %f\n'%(i,Q_save[i])
f.write(string)
f.close()
if verbose:
print "Saved file :", out_path
def save_dat_withpath(self,Q_out,out_path,iteration = -1):
Q_save = Q_out[iteration]
f = open(out_path,'w')
for i in range(Q_save.shape[0]):
string = '%d %f\n'%(i,Q_save[i])
f.write(string)
f.close()
def get_coordinates_from_Q(self,Q_det,q_thresh = 0.5):
det_ID = np.asarray(np.where(Q_det>q_thresh))[0]
det_coordinates = np.float32(np.stack([det_ID/self.W_grid,det_ID%self.W_grid]).T)
return det_coordinates
def get_coordinates_from_Q_reduced(self,Q_det,indices_reduced,q_thresh = 0.5):
det_ID_reduced = np.asarray(np.where(Q_det>q_thresh))[0]
det_ID = indices_reduced[det_ID_reduced]
det_coordinates = np.float32(np.stack([det_ID/self.W_grid,det_ID%self.W_grid]).T)
return det_coordinates
def show_detection_MAP(self,X_coordinates,X_map = np.zeros((1,1))):
'''
Allows to compare two maps:
X_coordinates will be in yellow and X_map in blue
'''
n_x = X_coordinates.shape[0]
if np.sum(X_map) ==0:
add =1
X_map = np.zeros((self.H_grid,self.W_grid))
else:
add =2
for i_x in range(n_x):
X_map[int(X_coordinates[i_x,0]) -2:int(X_coordinates[i_x,0]) +2,int(X_coordinates[i_x,1]) - 2 : int(X_coordinates[i_x,1]) + 2] += add
print 'invert before print'
plt.imshow(X_map[:,::-1],interpolation='nearest')
plt.show()
return X_map
def show_heatmap(self,Q):
plt.imshow(np.log(Q.reshape(self.H_grid,self.W_grid)))
plt.colorbar()
plt.show()
|
pierrebaque/DeepOcclusion
|
pom_room.py
|
Python
|
gpl-3.0
| 10,978
|
[
"Gaussian"
] |
fcb78ab3f36f3886737473355f81150d4294be5554cd1a9157baf84d20fb4a73
|
"""
==========================
FastICA on 2D point clouds
==========================
Illustrate visually the results of :ref:`ICA` vs :ref:`PCA` in the
feature space.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by green vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print __doc__
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD
import numpy as np
import pylab as pl
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
S = np.random.standard_t(1.5, size=(10000, 2))
S[0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA()
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
pl.scatter(S[:, 0], S[:, 1], s=2, marker='o', linewidths=0, zorder=10)
if axis_list is not None:
colors = [(0, 0.6, 0), (0.6, 0, 0)]
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
pl.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
# pl.quiver(x_axis, y_axis, x_axis, y_axis, zorder=11, width=0.01,
pl.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01,
scale=6, color=color)
pl.hlines(0, -3, 3)
pl.vlines(0, -3, 3)
pl.xlim(-3, 3)
pl.ylim(-3, 3)
pl.xlabel('x')
pl.ylabel('y')
pl.subplot(2, 2, 1)
plot_samples(S / S.std())
pl.title('True Independent Sources')
axis_list = [pca.components_.T, ica.get_mixing_matrix()]
pl.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
pl.legend(['PCA', 'ICA'], loc='upper left')
pl.title('Observations')
pl.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
pl.title('PCA scores')
pl.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
pl.title('ICA estimated sources')
pl.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
pl.show()
|
cdegroc/scikit-learn
|
examples/decomposition/plot_ica_vs_pca.py
|
Python
|
bsd-3-clause
| 3,177
|
[
"Gaussian"
] |
f05d6bd1a30e4e8bcb1bc26d1fa81d1606a87449f221b004353d80e1214d196c
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Run the script for details of the licence
# or refer to the notice section later in the file.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#!<^DATA
#!<^CONFIG
cfgColor = 0
cfgAutoSave = True
cfgReviewMode = True
cfgSysCalls = False
cfgEditorNt = "edit"
cfgEditorPosix = "nano,pico,vim,emacs"
cfgShortcuts = ['', '', '', '', '', '', '', '', '', '']
cfgAbbreviations = {}
cfgPAbbreviations = {}
#!<^CODE
import sys
import os
import re
from datetime import date
from datetime import timedelta
import platform
import urllib
import getpass
from md5 import md5
import struct
import tempfile
from threading import Timer
import stat
supportAes = True
try:
import pyRijndael
except:
supportAes = False
try:
import readline
except:
pass
usePlugin = True
try:
import ikogPlugin
except:
usePlugin = False
notice = [
"ikog.py v 1.90 2008-11-14",
"Copyright (C) 2006-2008 S. J. Butler",
"Visit http://www.henspace.co.uk for more information.",
"This program is free software; you can redistribute it and/or modify",
"it under the terms of the GNU General Public Licence as published by",
"the Free Software Foundation; either version 2 of the License, or",
"(at your option) any later version.",
"",
"This program is distributed in the hope that it will be useful,",
"but WITHOUT ANY WARRANTY; without even the implied warranty of",
"MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the",
"GNU General Public License for more details. The license is available",
"from http://www.gnu.org/licenses/gpl.txt"
]
banner = [
" _ _ ",
" (_) | | __ ___ __ _",
" | | | |/ / / _ \ / _` |",
" | | | < | (_) | | (_| |",
" |_| |_|\_\ \___/ \__, |",
" _ _ _ |___/",
" (_) | |_ | | __ ___ ___ _ __ ___ ___ _ __",
" | | | __| | |/ / / _ \ / _ \ | '_ \ / __| / _ \ | '_ \ ",
" | | | |_ | < | __/ | __/ | |_) | \__ \ | (_) | | | | |",
" |_| \__| |_|\_\ \___| \___| | .__/ |___/ \___/ |_| |_|",
" |_| _",
" __ _ _ __ ___ __ __ (_) _ __ __ _ ",
" / _` | | '__| / _ \ \ \ /\ / / | | | '_ \ / _` |",
" | (_| | | | | (_) | \ V V / | | | | | | | (_| | _",
" \__, | |_| \___/ \_/\_/ |_| |_| |_| \__, | (_)",
" |___/ |___/",
]
magicTag = "#!<^"
gMaxLen = 80
try:
ruler = "~".ljust(gMaxLen - 1, "~")
divider = "_".ljust(gMaxLen - 1, "_")
except Exception:
print "Error found. Probably wrong version of Python"
gReqPythonMajor = 2
gReqPythonMinor = 4
def safeRawInput(prompt):
try:
entry = raw_input(prompt)
except:
print "\n"
entry = ""
return entry
### global compare function
def compareTodo(a, b):
return cmp(a.getEffectivePriority(), b.getEffectivePriority())
def printError(msg):
print gColor.code("error") + "ERROR: " + msg + gColor.code("normal")
def clearScreen(useSys = False):
if useSys:
if os.name == "posix":
os.system("clear")
elif os.name in ("dos", "ce", "nt"):
os.system("cls")
print "\n"*25
for l in banner:
print l
### XTEA algorithm public domain
class Xtea:
def __init__(self):
pass
def crypt(self, key,data,iv='\00\00\00\00\00\00\00\00',n=32):
def keygen(key,iv,n):
while True:
iv = self.xtea_encrypt(key,iv,n)
for k in iv:
yield ord(k)
xor = [ chr(x^y) for (x,y) in zip(map(ord,data),keygen(key,iv,n)) ]
return "".join(xor)
def xtea_encrypt(self, key,block,n=32):
v0,v1 = struct.unpack("!2L",block)
k = struct.unpack("!4L",key)
sum,delta,mask = 0L,0x9e3779b9L,0xffffffffL
for round in range(n):
v0 = (v0 + (((v1<<4 ^ v1>>5) + v1) ^ (sum + k[sum & 3]))) & mask
sum = (sum + delta) & mask
v1 = (v1 + (((v0<<4 ^ v0>>5) + v0) ^ (sum + k[sum>>11 & 3]))) & mask
return struct.pack("!2L",v0,v1)
class WordWrapper:
def __init__(self, width):
self.width = width
self.nLines = 0
self.pos = 0
def addLine(self, pos):
self.pos = pos
self.nLines = self.nLines + 1
def getNLines(self):
return self.nLines
def intelliLen(self, text):
return len(gColor.stripCodes(text))
def wrap(self, text):
self.nLines = 0
formatted = text.replace("<br>", "\n").replace("<BR>", "\n")
lines = formatted.splitlines()
out = ""
self.pos = 0
for thisline in lines:
newline = True
words = thisline.split()
if self.pos != 0:
out = out + "\n"
self.addLine(0)
for w in words:
wlen = self.intelliLen(w) + 1
if (self.pos + wlen) == self.width:
out = out + " " + w
self.addLine(0)
elif (self.pos + wlen) < self.width:
if newline:
out = out + w
self.pos = wlen
else:
out = out + " " + w
self.pos = self.pos + wlen + 1
else:
out = out + "\n" + w
self.addLine(wlen)
newline = False
return out
### Color code class for handling color text output
class ColorCoder:
NONE = -1
ANSI = 0
codes = [{"normal":"\x1b[0;37;40m",
"title":"\x1b[1;32;40m",
"heading":"\x1b[1;35;40m",
"bold":"\x1b[1;35;40m",
"important":"\x1b[1;31;40m",
"error":"\x1b[1;31;40m",
"reverse":"\x1b[0;7m",
"row0":"\x1b[0;35;40m",
"row1":"\x1b[0;36;40m"},
{"normal":"\x1b[0;37m",
"title":"\x1b[1;32m",
"heading":"\x1b[1;35m",
"bold":"\x1b[1;35m",
"important":"\x1b[1;31m",
"error":"\x1b[1;31m",
"reverse":"\x1b[0;7m",
"row0":"\x1b[0;35m",
"row1":"\x1b[0;36m"}]
def __init__(self, set):
self.codeSet = self.NONE
self.setCodeSet(set)
def stripCodes(self, text):
# strip out the ansi codes
ex = re.compile("\x1b\[[0-9;]*m")
return ex.sub("", text)
def setCodeSet(self, set):
old = self.codeSet
if set < 0:
self.codeSet = self.NONE
elif set < len(self.codes):
self.codeSet = set
return (old != self.codeSet)
def isValidSet(self, myset):
if myset < len(self.codes):
return True
else:
return False
def colorSupported(self):
return (os.name == "posix" or os.name == "mac")
def usingColor(self):
return (self.codeSet <> self.NONE and self.colorSupported())
def code(self, type):
if self.codeSet == self.NONE or not self.colorSupported():
return ""
else:
return self.codes[self.codeSet][type]
def printCode(self, type):
if self.codeSet != self.NONE:
print self.code(type),
def getCodeSet(self):
return self.codeSet
### Viewer class for paging through multiple lines
class ListViewer:
def __init__(self, maxlines):
self.maxlines = maxlines
def show(self, list, pause):
count = 0
for line in list:
if count >= self.maxlines or line == pause:
io = safeRawInput("--- Press enter for more. Enter s to skip ---").strip()
print ""
if len(io) > 0 and io.upper()[0] == "S":
break
count = 0
if line != pause:
print line
count = count + 1
### Handler for encryption
class Encryptor:
TYPE_OBSCURED = "xtea_"
TYPE_AES = "aes_"
SALT_64 = "1hJ8*gpQ"
def __init__(self):
self.key = ""
self.encryptionType = self.TYPE_OBSCURED
def setType(self, codeType):
if codeType == self.TYPE_AES and supportAes == False:
self.encryptionType = self.TYPE_OBSCURED
else:
self.encryptionType = codeType
return self.encryptionType
def setKey(self, key):
self.key = key
def getKey(self):
return self.key
def enterKey(self, prompt1, prompt2):
done = False
while not done:
input1 = getpass.getpass(prompt1 + " >>>")
if prompt2 != "":
input2 = getpass.getpass(prompt2 + " >>>")
if input1 != input2:
print "You must enter the same password. Start again"
else:
done = True
else:
done = True
self.key = input1
return input1
def complexKey(self):
return md5(self.key).digest()
def getSecurityClass(self, encrypted):
if encrypted.startswith(self.TYPE_OBSCURED):
return "private xtea"
if encrypted.startswith(self.TYPE_AES):
return "secret aes"
return "unknown"
def obscure(self, plainText):
key = self.complexKey()
obscured = Xtea().crypt(key, plainText, self.SALT_64)
return self.TYPE_OBSCURED + obscured.encode('hex_codec')
def unobscure(self, obscured):
plain = ""
data = obscured[len(self.TYPE_OBSCURED):]
data = data.decode('hex_codec')
key = self.complexKey()
plain = Xtea().crypt(key, data, self.SALT_64)
return plain
def encryptAes(self, plainText):
if len(self.key) < 16:
key = self.complexKey()
else:
key = self.key
obscured = pyRijndael.EncryptData(key, plainText)
return self.TYPE_AES + obscured.encode('hex_codec')
def decryptAes(self, encrypted):
plain = ""
data = encrypted[len(self.TYPE_AES):]
data = data.decode('hex_codec')
if len(self.key) < 16:
key = self.complexKey()
else:
key = self.key
plain = pyRijndael.DecryptData(key, data)
return plain
def enterKeyAndEncrypt(self, plainText):
self.enterKey("Enter the master password.", "Re-enter the master password")
return self.encrypt(plainText)
def encrypt(self, plainText):
if self.encryptionType == self.TYPE_AES:
return self.encryptAes(plainText)
else:
return self.obscure(plainText)
def enterKeyAndDecrypt(self, encryptedText):
self.enterKey("Enter your master password", "")
return self.decrypt(encryptedText)
def decrypt(self, encryptedText):
if encryptedText.startswith(self.TYPE_AES):
if not supportAes:
return "You do not have the pyRinjdael module so the text cannot be decrypted."
else:
return self.decryptAes(encryptedText)
else:
return self.unobscure(encryptedText)
### Handler for user input
class InputParser:
def __init__(self, prompt):
self.prompt = prompt
def read(self, entry = ""):
if entry == "":
entry = safeRawInput(self.prompt)
entry = entry.strip()
if entry == "":
command = ""
line = ""
else:
if usePlugin:
entry = ikogPlugin.modifyUserInput(entry)
if entry.find(magicTag) == 0:
printError("You cannot begin lines with the sequence " + magicTag)
command = ""
line = ""
elif entry.find(TodoItem.ENCRYPTION_MARKER) >= 0:
printError ("You cannot use the special sequence " + TodoItem.ENCRYPTION_MARKER)
command = ""
line = ""
else:
n = entry.find(" ")
if n >= 0:
command = entry[:n]
line = entry[n + 1:]
else:
command = entry
line = ""
return (command, line)
class EditorLauncher:
WARNING_TEXT = "# Do not enter secret or private information!"
def __init__(self):
pass
def edit(self, text):
ed = ""
terminator = "\n"
if os.name == "posix":
ed = cfgEditorPosix
elif os.name == "nt":
ed = cfgEditorNt
terminator = "\r\n"
if ed == "":
printError("Sorry, but external editing not supported on " + os.name.upper())
success = False
else:
fname = self.makeFile(text, terminator)
if fname == "":
printError("Unable to create temporary file.")
else:
success = self.run(ed, fname)
if success:
(success, text) = self.readFile(fname)
if text == self.orgText:
print("No changes made.");
success = False
self.scrubFile(fname)
if success:
return text
else:
return ""
def scrubFile(self, fname):
try:
os.remove(fname)
except Exception, e:
printError("Failed to remove file " + fname + ". If you entered any private data you should delete this file yourself.")
def readFile(self, fname):
success = False
try:
fh = open(fname, "rt")
line = fh.readline()
text = ""
first = True
while line != "":
thisLine = self.safeString(line)
if thisLine != self.WARNING_TEXT:
if not first:
text = text + "<br>"
text = text + thisLine
first = False
line = fh.readline()
fh.close()
success = True
except Exception, e:
printError("Error reading the edited text. " + str(e))
return (success, text)
def safeString(self, text):
return text.replace("\r","").replace("\n","")
def makeFile(self, text, terminator):
fname = ""
(fh, fname) = tempfile.mkstemp(".tmpikog","ikog")
fout = os.fdopen(fh,"wt")
text = text.replace("<BR>", "<br>")
self.orgText = text
lines = text.split("<br>")
fout.write(self.WARNING_TEXT + terminator)
for thisline in lines:
fout.write(self.safeString(thisline) + terminator)
fout.close()
return fname
def run(self, program, file):
progs = program.split(",")
for prog in progs:
success = self.runProgram(prog.strip(), file)
if success:
break;
return success
def runProgram(self, program, file):
success = False
if os.name == "posix":
try:
progarg = program
os.spawnlp(os.P_WAIT, program, progarg, file)
success = True
except os.error:
pass
except Exception, e:
printError(str(e))
elif os.name == "nt":
if file.find(" ") >= 0:
file = "\"" + file + "\""
for path in os.environ["PATH"].split(os.pathsep):
try:
prog = os.path.join(path, program)
if prog.find(" ") >= 0:
progarg = "\"" + prog + "\""
else:
progarg = prog
os.spawnl(os.P_WAIT, prog, progarg, file)
success = True
if success:
break
except os.error:
pass
except Exception, e:
printError(str(e))
return success
### The main todo list
class TodoList:
quickCard = ["Quick reference card:",
"? ADD/A/+ text FILTER/FI [filter]",
"HELP/H IMMEDIATE/I/++ text TOP/T [N]",
"COLOR/COLOUR/C [N] KILL/K/X/- N NEXT/N",
"MONOCHROME/MONO CLEAR PREV/P",
"EXPORT REP/R N [text] GO/G N",
"IMPORT file MOD/M N [text] LIST/L [filter]",
"REVIEW/REV ON/OFF EXTEND/E N [text] LIST>/L> [filter]",
"V0 EDIT/ED [N] @",
"V1 SUB/SU N /s1/s2/ :D",
"WEB FIRST/F N :P>",
"SAVE/S DOWN/D N @>",
"AUTOSAVE/AS ON|OFF UP/U N :D>",
"VER/VERSION NOTE/NOTES text :P>",
"CLEARSCREEN/CLS O/OPEN file SHOW N",
"SYS ON|OFF NEW file SETEDxx editor",
"!CMD command 2 ABBREV/AB @x @full",
"ABBREV/AB ? PAB ? PAB :px :pfull",
"SHORTCUT/SC N cmd SHORTCUT/SC ? =N",
"ARCHIVE/DONE N [text]",
]
help = [ "",
"Introduction",
"------------",
"The program is designed to help manage tasks using techniques",
"such as Getting Things Done by David Allen. Check out",
"http://www.henspace.co.uk for more information and detailed help.",
"To use the program, simply enter the task at the prompt.",
"All of the commands are displayed in the next section.",
"!PAUSE!",
"COMMANDS",
"--------",
"Commands that have more than one method of entry are shown separated by /",
"e.g HELP/H means that you can enter either HELP or an H.",
"All commands can be entered in upper or lower case.",
"Items shown in square brackets are optional.",
"Items shown separated by the | symbol are alternatives. e.g ON|OFF means",
"you should type either ON or OFF.",
"Note that some commands refer to adding tasks to the top or bottom of the",
"list. However the task's position in the list is also determined by its.",
"priority. So, for example, adding a task to the top will still not allow",
"it to precede tasks that have been assigned a higher priority number. ",
"!PAUSE!",
"GENERAL COMMANDS",
"----------------",
"? : displays a quick reference card",
"HELP/H : displays this help.",
"VERSION/VER : display the version.",
"WEB : Go to the website for more information",
"CLEARSCREEN/CLS : Clear the screen",
"COLOR/COLOUR/C [N] : Use colour display (not Windows) N=1 for no background",
"MONOCHROME/MONO : Use monochrome display",
"EXPORT : Export the tasks only to filename.tasks.txt",
"IMPORT file : Import tasks from the file",
"REVIEW/REV ON|OFF : If on, hitting enter moves to the next task",
" : If off, enter re-displays the current task",
"V0 : Same as REVIEW OFF",
"V1 : Same as REVIEW ON",
"SAVE/S : Save the tasks",
"O/OPEN file : Open a new data file.",
"NEW file : Create a new data file.",
"AUTOSAVE/AS ON|OFF : Switch autosave on or off",
"SYS ON|OFF : Allow the program to use system calls.",
"!CMD command : Run a system command.",
"2 : Start a two minute timer (for GTD)",
"QUIT/Q : quit the program",
"!PAUSE!",
"TASK ENTRY AND EDITING COMMANDS",
"-------------------------------",
"For the editing commands that require a task number, you can",
"replace N by '^' or 'this' to refer to the current task.",
"ADD/A/+ the task : add a task to the bottom of the list.",
" : Entering any line that does not begin with",
" : a valid command and which is greater than 10",
" : characters long is also assumed to be an addition.",
"EDIT/ED [N] : Create task, or edit task N, using external editor.",
"SUB/SU N /s1/s2/ : Replace text s1 with s2 in task N. Use \/ if you",
" : need to include the / character.",
"NOTE/NOTES text : shorthand for ADD #0 @Notes text",
"IMMEDIATE/I/++ : add a task to the top of the list to do today.",
"REP/R N [text] : replace task N",
"MOD/M N [text] : modify task N.",
"EXTEND/E N [text] : add more text to task N",
"FIRST/F N : move task N to the top.",
"DOWN/D/ N : move task N down the queue",
"UP/U/ N : move task N up the queue",
"!PAUSE!",
"TASK REMOVAL COMMANDS",
"---------------------",
"KILL/K/X/- N : kill (delete) task N. You must define N",
"DONE N [text] : Remove task N and move to an archive file",
"ARCHIVE N [text] : Same as DONE",
"CLEAR : Remove all tasks",
"!PAUSE!",
"DISPLAY COMMANDS",
"----------------",
"SHOW N : display encrypted text for task N",
"FILTER/FI [filter] : set a filter. Applies to all displays",
" : See list for details of the filter",
" : Setting the filter to nothing clears it.",
"TOP/T [N] : Go to top, list N tasks, and display the top task",
"NEXT/N : display the next task. Same as just hitting enter",
"PREV/P : display previous task",
"GO/G N : display task N",
"LIST/L [filter] : list tasks. Filter = context, project, priority, date",
" : or word. Contexts begin with @ and projects with :p",
" : Dates begin with :d, anything else is a search word.",
" : Precede term with - to exclude e.g. -@Computer",
" : e.g LIST @computer or LIST #5",
"@ : sorted list by Context.",
":D : sorted list by Dates",
":P : sorted list by Projects",
"LIST>/L> [filter] : standard list sent to an HTML report",
"@> : sorted list by Context sent to an HTML report",
":D> : sorted list by Dates sent to an HTML report",
":P> : sorted list by Projects sent to an HTML report",
" : The HTML reports are sent to todoFilename.html",
"!PAUSE!",
"ADVANCED OPTIONS",
"----------------",
"The SETEDxxx commands allow you to use an external editor.",
"Note the editor you pick should be a simple text editor. If you pick",
"something that doesn't work, try the defaults again.",
"Because some systems may have different editors installed, you can set",
"more than one by separating the editors usng commas. The program will",
"use the first one it finds.",
"For Windows the default is edit, which works quite well in the terminal",
"but you could change it to notepad.",
"For Linux, the default is nano,pico,vim,emacs.",
"To use external editors you must switch on system calls using the SYS ON",
"command",
"SETEDNT command : Set the external editor for Windows (NT).",
"SETEDPOSIX command : Set the editor for posix systems.",
" : e.g. SETEDNT edit",
" : SETEDPOSIX nano,vim",
"SHORTCUT/SC ? : list shortcuts",
"SHORTCUT/SC N cmd : Set shortcut N to command cmd",
"=N : Run shortcut N",
"!PAUSE!",
"ABBREV/AB @x @full : Create new abbreviation. @x expands to @full",
"ABBREV/AB ? : List context abbreviations.",
"PAB :px :pfull : Project abbreviation. :px expands to :pfull",
"PAB ? : List project abbreviations.",
"!PAUSE!",
"ENTERING TASKS",
"--------------",
"When you enter a task, you can embed any number of contexts in the task.",
"You can also embed a project description by preceding it with :p",
"You can assign a priority by preceding a number by #. e.g. #9.",
"If you don't enter a number, a default of 5 is used. The higher the ",
"number the more important it is. Priorities range from 1 to 10.",
"Only the first # is used for the priority so you can use # as",
"a normal character as long as you precede it with a priority number.",
"You can define a date when the task must be done by preceding the date",
"with :d, i.e :dYYYY/MM/DD or :dMM/DD or :dDD. If you omit the year/month",
"they default to the current date. Adding a date automatically creates an",
"@Date context for the task.",
"So, for example, to add a new task to e-mail Joe, we could enter:",
"+ e-mail joe @computer",
"or to add a task to the decorating project, we could enter:",
"+ buy wallpaper :pdecorating",
"to enter a task with an importance of 9 we could enter:",
"+ book that holiday #9 @Internet",
"!PAUSE!",
"MODIFYING AND EXTENDING TASKS",
"-----------------------------",
"The modify command allows you to change part of an existing task.",
"So for example, imagine you have a task:",
"[05] Buy some food #9 @Internet Projects:Shopping",
"Enter the command M 5 and then type:",
"@C",
"Because the only element we have entered is a new context, only",
"that part is modified, so we get.",
"[05] Buy some food #9 @Computer Projects:Shopping",
"Likewise, had we entered:",
"Buy some tea :pEating",
"We would have got",
"[05] Buy some tea #9 @Internet Projects:Eating",
"The extend command is similar but it appends the entry. So had",
"we used the command E 5 instead of M 5 the result would have been",
"[05] Buy some food ... Buy some tea #9 @Internet Projects:Eating",
"!PAUSE!",
"CONTEXTS",
"--------",
"Any word preceded by @ will be used as a context. Contexts are like",
"sub-categories or sub-lists. There are a number of pre-defined",
"abbreviations that you can use as well. The recognised abbreviations",
"are:",
"@A = @Anywhere (this is the default)",
"@C = @Computer",
"@D = @Desk",
"@E = @Errands",
"@H = @Home",
"@I = @Internet",
"@L = @Lunch",
"@M = @Meeting",
"@N = @Next",
"@O = @Other",
"@P = @Phone",
"@PW= @Password",
"@S = @Someday/maybe",
"@W4= @Waiting_for",
"@W = @Work",
"!PAUSE!",
"ENTERING DATES",
"--------------",
"An @Date context is created if you embed a date in the task.",
"Dates are embedded using the :dDATE format.",
"Valid DATE formats are yyyy-mm-dd, mm-dd or dd",
"You can also use : or / as the separators. So, for example:",
":d2006/12/22 or :d2006-11-7 or :d9/28 are all valid entries.",
"",
"If you set a date, then until that date is reached, the task is given",
"an effective priority of 0. Once the date is reached, the task's",
"priority is increased by 11, moving it to the of the list.",
"",
"A date entry of :d0 can be used to clear a date entry.",
"A date entry of :d+X can be used to create a date entry of today + X days.",
"So :d+1 is tomorrow and :d+0 is today.",
"!PAUSE!",
"ENCRYPTING TEXT",
"---------------",
"If you want to encrypt text you can use the <private> or <secret> tags or",
"their abbreviations <p> and <s>.",
"These tags will result in all text following the tag to be encrypted.",
"Note that any special commands, @contexts for example, are treated as plain",
"text in the encrypted portion.",
"To display the text you will need to use the SHOW command.",
"",
"The <private> tag uses the inbuilt XTEA algorithm. This is supposedly a",
"relatively secure method but probably not suitable for very sensitive data.",
"",
"The <secret> tag can only be used if you have the pyRijndael.py module.",
"This uses a 256 bit Rinjdael cipher. The module can be downloaded from ",
"http://jclement.ca/software/pyrijndael/",
"You can install this in your Python path or just place it alongside your",
"ikog file.",
"Note you cannot use the extend command with encrypted text.",
"",
"!PAUSE!",
"MARKING TASKS AS COMPLETE",
"-------------------------",
"The normal way to mark a task as complete is just to remove it using the",
"KILL command. If you want to keep track of tasks you have finished, you",
"can use the ARCHIVE or DONE command. This gives the task an @Archived",
"context, changes the date to today and then moves it from the current",
"file to a file with archive.dat appended. The archive file is a valid",
"ikog file so you can use the OPEN command to view it, edit it and run",
"reports in the normal way. So assuming your current script is ikog.py,",
"to archive the current task you could enter:",
"",
"ARCHIVE ^ I have finished this",
"",
"This would move the task to a file called ikog.py.archive.dat",
"",
"!PAUSE!",
"USING EXTERNAL DATA",
"-------------------",
"Normally the tasks are embedded in the main program file so all you have",
"to carry around with you is the ikog.py file. The advantage is that you",
"only have one file to look after; the disadvantage is that every time you",
"save a task you have to save the program as well. If you want, you can",
"keep your tasks in a separate file.",
"To do this, use the EXPORT function to create a file ikog.py.tasks.txt",
"Use the CLEAR command to remove the tasks from your main ikog.py program.",
"Rename the exported file from ikog.py.tasks.txt to ikog.py.dat",
"Ikog will now use this file for storing your tasks.",
"",
"!PAUSE!",
"PASSING TASKS VIA THE COMMAND LINE",
"----------------------------------",
"It is possible to add tasks via the command line. The general format of",
"the command line is:",
" ikog.py filename commands",
"The filename is the name of the data file containing your tasks. You",
"can use . to represent the default internal tasks.",
"Commands is a set of normal ikog commands separated by the / ",
"character. Note there must be a space either side of the /.",
"So to add a task and then exit the program we could just enter:",
" ikog.py . + here is my task / QUIT",
"Note that we added the quit command to exit ikog.",
"You must make sure that you do not use any commands that require user",
"input. Deleting tasks via the command line is more complicated as you",
"need to find the task automatically. If you do try to delete this way,",
"use the filter command to find some unique text and then delete it. eg.",
" ikog.py . FI my_unique_text / KILL THIS / QUIT",
"Use THIS instead of ^ as a caret has a special meaning in Windows.",
"If you do intend automating ikog from the command line, you should add",
"a unique reference to each task so you can find it later using FILTER. eg.",
"+ this is my task ref_1256",
"!PAUSE!"]
MOVE_DOWN = 0
MOVE_UP = 1
MOVE_TOP = 2
MAX_SHORTCUTS = 10
def __init__(self, todoFile, externalDataFile):
self.setShortcuts()
self.dirty = False
self.code = []
self.todo = []
self.autoSave = True
self.review = True
self.sysCalls = False
self.currentTask = 0
self.globalFilterText = ""
self.globalFilters = []
self.localFilterText = ""
self.localFilters = []
# split the file into the source code and the todo list
self.filename = todoFile
try:
self.filename = os.readlink(todoFile)
except Exception:
pass # probably windows
externalDataFile = self.makeFilename(externalDataFile)
self.externalData = self.findDataSource(externalDataFile)
if self.externalData:
self.filename = externalDataFile
else:
self.splitFile(self.filename, False)
self.exactPriority = False
self.htmlFile = ""
def setPAbbreviation(self, line):
save = False
elements = line.split(" ", 1)
if not elements[0].lower().startswith(":p"):
self.showError("Project abbreviations must begin with :p")
elif len(elements) > 1:
if not elements[1].lower().startswith(":p"):
abb = ":p" + elements[1].title()
else:
abb = ":p" +elements[1][2:].title()
globalPAbbr.addAbbreviation(elements[0], abb)
save = True
else:
if not globalPAbbr.removeAbbreviation(elements[0]):
self.showError("Could not find project abbreviation " + line)
else:
print "Project abbreviation ", line, " removed."
save = True
return save
def showPAbbreviations(self):
print globalPAbbr.toStringVerbose()
def setAbbreviation(self, line):
save = False
elements = line.split(" ", 1)
if not elements[0].startswith("@"):
self.showError("Abbreviations must begin with @")
elif len(elements) > 1:
if not elements[1].startswith("@"):
abb = "@" + elements[1].title()
else:
abb = "@" +elements[1][1:].title()
globalAbbr.addAbbreviation(elements[0], abb)
save = True
else:
if not globalAbbr.removeAbbreviation(elements[0]):
self.showError("Could not find abbreviation " + line)
else:
print "Abbreviation ", line, " removed."
save = True
return save
def showAbbreviations(self):
print globalAbbr.toStringVerbose()
def setShortcut(self, line, force = False):
elements = line.split(" ", 1)
try:
index = int(elements[0])
if len(elements) > 1:
command = elements[1]
else:
command = ""
except Exception, e:
self.showError("Did not understand the command. Format should be SHORTCUT N my command.")
return False
if index < 0 or index > len(self.shortcuts):
self.showError("The maximum number of shortcuts is " + str(len(self.shortcuts)) + ". Shortcuts ignored.")
return False
else:
if self.shortcuts[index] != "" and not force:
if safeRawInput("Do you want to change the current command '" + self.shortcuts[index] + "'? Enter Yes to overwrite. >>>").upper() != "YES":
return False
self.shortcuts[index] = command
return True
def showShortcuts(self):
index = 0
for s in self.shortcuts:
if s == "":
msg = "unused"
else:
msg = s
print "=%1d %s" %(index, msg)
index = index + 1
def setShortcuts(self, settings = []):
if len(settings) > self.MAX_SHORTCUTS:
self.showError("The maximum number of shortcuts is " + str(self.MAX_SHORTCUTS) + ". Shortcuts ignored.")
self.shortcuts = ["" for n in range(self.MAX_SHORTCUTS)]
if len(settings) > 0:
self.shortcuts[0:len(settings)] = settings
def getShortcutIndex(self, command):
if len(command) == 2 and command[0:1].upper() == "=":
index = ord(command[1]) - ord("0")
if index >= self.MAX_SHORTCUTS:
index = -1
else:
index = -1
return index
def getShortcut(self, command):
index = self.getShortcutIndex(command)
if index >= 0:
return self.shortcuts[index]
else:
return ""
def safeSystemCall(self, line):
words = line.split()
if len(words) == 0:
self.showError("Nothing to do.")
elif words[0].upper() == "RM" or words[0].upper() == "RMDIR" or words[0].upper() == "DEL":
self.showError("Sorry, but deletion commands are not permitted.")
else:
os.system(line)
self.pause()
def processCfgLine(self, line):
params = line.split("=")
if len(params) < 2:
return
cmd = params[0].strip()
if cmd == "cfgEditorNt":
global cfgEditorNt
cfgEditorNt = params[1].replace("\"", "").strip()
elif cmd == "cfgEditorPosix":
global cfgEditorPosix
cfgEditorPosix = params[1].replace("\"", "").strip()
elif cmd == "cfgShortcuts":
elements = params[1].strip()[1:-1].split(",")
index = 0
for e in elements:
self.setShortcut(str(index) + " " + e.strip()[1:-1], True)
index = index + 1
elif cmd == "cfgAutoSave":
if params[1].upper().strip() == "TRUE":
asave = True
else:
asave = False
self.setAutoSave(asave, False)
elif cmd == "cfgReviewMode":
if params[1].upper().strip() == "TRUE":
self.setReview("ON")
else:
self.setReview("OFF")
elif cmd == "cfgSysCalls":
if params[1].upper().strip() == "TRUE":
self.setSysCalls("ON")
else:
self.setSysCalls("OFF")
elif cmd == "cfgColor":
gColor.setCodeSet(int(params[1].strip()))
elif cmd == "cfgAbbreviations":
abbrs = eval(params[1].strip())
globalAbbr.setAbbreviations(abbrs)
elif cmd == "cfgPAbbreviations":
abbrs = eval(params[1].strip())
globalPAbbr.setAbbreviations(abbrs)
else:
self.showError("Unrecognised command " + cmd)
def makeFilename(self, name):
(root, ext) = os.path.splitext(name)
if ext.upper() != ".DAT":
name = name + ".dat"
try:
name = os.path.expanduser(name)
except Exception, e:
self.showError("Failed to expand path. " + str(e))
return name
def findDataSource(self, filename):
success = False
try:
self.splitFile(filename, False)
print "Using external data file ", filename
success = True
except IOError:
print "No external data file ", filename, ", so using internal tasks."
return success
def setSysCalls(self, mode):
oldCalls = self.sysCalls
mode = mode.strip().upper()
if mode == "ON":
self.sysCalls = True
print "Using system calls for clear screen"
elif mode == "OFF":
self.sysCalls = False
print "No system calls for clear screen"
else:
self.showError("Could not understand the sys command. Use SYS ON or OFF.")
return (self.sysCalls != oldCalls)
def setAutoSave(self, asave, save):
if asave:
if self.autoSave == False:
self.autoSave = True
if save:
self.save("")
elif self.autoSave == True:
self.autoSave = False
if save:
self.save("")
if self.autoSave:
print "Autosave is on."
else:
print "Autosave is off."
def showError(self, msg):
printError(msg)
def pause(self, prompt = "Press enter to continue."):
if safeRawInput(prompt).strip() != "":
print "Entry ignored!"
def setReview(self, mode):
oldReview = self.review
mode = mode.strip().upper()
if mode == "ON":
self.review = True
print "In review mode. Enter advances to the next task"
elif mode == "OFF":
self.review = False
print "Review mode off. Enter re-displays the current task"
else:
self.showError("Could not understand the review command. Use REVIEW ON or OFF.")
return (self.review != oldReview)
def sortByPriority(self):
self.todo.sort(key=TodoItem.getEffectivePriority, reverse = True)
def run(self, commandList):
if not supportAes:
print "AES encryption not available."
print("\nEnter HELP for instructions.")
done = False
printCurrent = True
self.sortByPriority()
reopen = ""
enteredLine = ""
truncateTask = False
while not done:
self.checkCurrentTask()
if printCurrent:
self.moveToVisible()
print ruler
if truncateTask:
self.printItemTruncated(self.currentTask, "Current: ")
else:
self.printItemVerbose(self.currentTask)
print ruler
printCurrent= True
truncateTask = False
if self.dirty:
prompt = "!>>"
else:
prompt = ">>>"
if len(commandList) >= 1:
enteredLine = commandList[0]
commandList = commandList[1:]
print enteredLine
(rawcommand, line) = InputParser(prompt).read(enteredLine)
enteredLine = ""
command = rawcommand.upper()
if self.getShortcutIndex(command) >= 0:
sc = self.getShortcut(command)
if sc != "":
(rawcommand, line) = InputParser("").read(self.getShortcut(command))
else:
rawcommand = ""
line = ""
continue
print "Shortcut: ", rawcommand, " ", line
command = rawcommand.upper()
if command == "":
if self.review:
self.incTaskLoop()
elif command == "PAB":
if line.strip() == "?":
self.showPAbbreviations()
elif self.setPAbbreviation(line):
self.save("")
elif command == "ABBREV" or command == "AB":
if line.strip() == "?":
self.showAbbreviations()
elif self.setAbbreviation(line):
self.save("")
elif command == "SHORTCUT" or command == "SC":
if line.strip() == "?":
self.showShortcuts()
else:
if self.setShortcut(line):
self.save("")
printCurrent = False
elif command == "2":
enteredLine = self.runTimer(2)
printCurrent = False
elif command == "CLS" or command == "CLEARSCREEN":
clearScreen(self.sysCalls)
elif command == "SETEDNT":
global cfgEditorNt
cfgEditorNt = line
self.save("")
elif command == "SETEDPOSIX":
global cfgEditorPosix
cfgEditorPosix = line
self.save("")
elif command == "SYS":
if self.setSysCalls(line):
self.save("")
elif command == "!CMD":
if self.sysCalls:
self.safeSystemCall(line)
else:
self.showError("System calls are not allowed. Use SYS ON to enable them.")
elif command == "SHOW" or command == "SH":
self.decrypt(line)
self.pause("Press enter to clear screen and continue. ")
clearScreen(self.sysCalls)
elif command == "VERSION" or command == "VER":
print notice[0]
elif command == "SAVE" or command == "S":
if not self.dirty:
print "There's no need to save now. If the prompt shows >>> "
print "then there is nothing to save. You only need to save if the prompt "
print "shows !>>"
else:
self.forceSave("")
elif command == "NEW":
filename = self.makeFilename(line)
if self.createFile(filename):
reopen = filename
if self.dirty:
self.forceSave("")
done = True
printCurrent = False
elif command == "OPEN" or command == "O":
filename = self.makeFilename(line)
reopen = filename
if self.dirty:
self.forceSave("")
done = True
printCurrent = False
elif command == "AUTOSAVE" or command == "AS":
if line== "":
self.showError("You must enter ON or OFF for the autosave command")
else:
self.setAutoSave(line.upper() == "ON", True)
elif command == "REVIEW" or command == "REV":
if self.setReview(line):
self.save("")
elif command == "V0":
if self.setReview("OFF"):
self.save("")
elif command == "V1":
if self.setReview("ON"):
self.save("")
elif command == "?":
self.printHelp(self.quickCard)
elif command == "HELP" or command == "H":
self.printHelp(self.help)
elif command == "QUIT" or command == "Q":
if self.dirty:
self.forceSave("")
done = True
printCurrent = False
elif command == "WEB":
try:
webbrowser.open("http://www.henspace.co.uk")
except Exception, e:
self.showError("Unable to launch browser. " + str(e))
elif command == "COLOR" or command == "COLOUR" or command == "C":
try:
set = int(line, 10)
except ValueError:
set = gColor.ANSI
if not gColor.isValidSet(set):
self.showError("Invalid colour set ignored.")
elif gColor.setCodeSet(set):
self.save("")
elif command == "MONOCHROME" or command == "MONO":
if gColor.setCodeSet(gColor.NONE):
self.save("")
elif command == "EXPORT":
self.exportTasks()
elif command == "IMPORT":
if self.importTasks(line):
self.save("")
elif command == "CLEAR" and line == "":
if self.clear():
self.save("")
elif command == "FILTER" or command == "FI" or command == "=":
self.setFilterArray(False, line)
elif command == "NEXT" or command == "N":
self.incTaskLoop()
elif command == "PREV" or command == "P":
self.decTaskLoop()
elif command == "TOP" or command == "T" or command == "0":
self.currentTask = 0
if line != "":
self.setFilterArray(True, "")
self.showLocalFilter()
self.printShortList(line)
truncateTask = True
elif command == "GO" or command == "G":
self.moveTo(line)
elif command == "IMMEDIATE" or command == "I" or command == "++":
newItem = self.createItem(":d+0 " + line)
if newItem.hasHiddenTask():
clearScreen(self.sysCalls)
if newItem.hasError():
print "Errors were found:"
print newItem.getError()
print "The task was not added."
printCurrent = False
else:
self.todo.insert(0, newItem)
self.currentTask = 0
self.sortByPriority()
self.save("")
elif command == "KILL" or command == "K" or command == "-" or command == "X":
if self.removeTask(line):
self.save("")
elif command == "ARCHIVE" or command == "DONE":
if self.archiveTask(line):
self.save("")
elif command == "REP" or command =="R":
if self.modifyTask(line, TodoItem.REPLACE):
self.sortByPriority()
self.save("")
else:
printCurrent = False
elif command == "SUB" or command == "SU":
if self.substituteText(line):
self.sortByPriority()
self.save("")
elif command == "EDIT" or command == "ED":
if not self.sysCalls:
self.showError("External editing needs to use system calls. Use SYS ON to enable them.")
elif line == "":
self.addTaskExternal()
elif self.modifyTask(line, TodoItem.MODIFY, externalEditor = True):
self.sortByPriority()
self.save("")
else:
printCurrent = False
elif command == "MOD" or command == "M":
if self.modifyTask(line, TodoItem.MODIFY):
self.sortByPriority()
self.save("")
else:
printCurrent = False
elif command == "EXTEND" or command == "E":
if self.modifyTask(line, TodoItem.APPEND):
self.sortByPriority()
self.save("")
else:
printCurrent = False
elif command == "FIRST" or command == "F":
if self.moveTask(line, self.MOVE_TOP):
self.sortByPriority()
self.save("")
self.currentTask = 0
elif command == "DOWN" or command == "D":
if self.moveTask(line, self.MOVE_DOWN):
self.sortByPriority()
self.save("")
elif command == "UP" or command == "U":
if self.moveTask(line, self.MOVE_UP):
self.sortByPriority()
self.save("")
elif command == "LIST" or command == "L":
print ruler
self.setFilterArray(True, line)
self.showLocalFilter()
self.printList(False, "", "")
self.clearFilterArray(True)
print ruler
truncateTask = True
elif command == "LIST>" or command == "L>":
self.startHtml("")
self.setFilterArray(True, line)
self.showLocalFilter()
self.printList(False, "", "")
self.clearFilterArray(True)
self.endHtml()
elif command == "@":
self.listByAction()
truncateTask = True
elif command == ":P":
self.listByProject()
truncateTask = True
elif command == ":D":
self.listByDate()
truncateTask = True
elif command == "@>":
self.startHtml("Report by Context")
self.listByAction()
self.endHtml()
elif command == ":P>":
self.startHtml("Report by Project")
self.listByProject()
self.endHtml()
elif command == ":D>":
self.startHtml("Report by Date")
self.listByDate()
self.endHtml()
elif command == "ADD" or command == "A" or command == "+":
self.addTask(line)
elif command == "NOTE" or command == "NOTES":
self.addTask("#0 @Notes " + line)
elif (len(command) + len(line)) > 10:
self.addTask(rawcommand + " " + line)
elif len(command) > 0:
self.showError("Didn't understand. (Make sure you have a space after the command or your entry is longer than 10 characters)")
printCurrent = False
return reopen
def timeout(self):
self.timerActive = False
clearScreen()
print "\n\x07Timer\x07 complete.\x07\n\x07Press enter to continue.\x07"
def runTimer(self, delay):
self.timerActive = True
t = Timer(delay * 60 , self.timeout)
t.start()
s = raw_input(str(delay) + " minute timer running.\nAny entry will cancel the timer:\n>>>")
if self.timerActive:
t.cancel()
print "Timer cancelled."
elif s != "":
s = ""
print "Input discarded as timer has finished."
return s.strip()
def addTaskExternal(self):
exEdit = EditorLauncher()
entry = exEdit.edit("")
if entry != "":
self.addTask(entry)
else:
self.showError("Nothing to add")
def addTask(self, line):
newItem = self.createItem(line)
if newItem.hasError():
print "Errors were found:"
print newItem.getError()
print "The task was not added."
printCurrent = False
else:
if newItem.hasHiddenTask():
clearScreen(self.sysCalls)
self.todo.append(newItem)
self.sortByPriority()
self.save("")
def checkCurrentTask(self):
if self.currentTask > len(self.todo) - 1:
self.currentTask = len(self.todo) - 1
if self.currentTask < 0:
self.currentTask = 0
def writeArchive(self, item):
success = False
filename = self.filename + ".archive.dat"
try:
if not os.path.exists(filename):
f = open(filename,"wb")
f.write("# " + notice[0] + "\n")
f.write(magicTag + "DATA\n")
else:
f = open(filename,"a+b")
f.write(item.toString())
f.write("\n")
f.close()
print "Tasks archived to " + filename
success = True
except Exception, e:
self.showError("Error trying to archive the tasks.\n" + str(e))
return success
def exportTasks(self):
filename = self.filename + ".tasks.txt"
try:
f = open(filename,"wb")
f.write("# " + notice[0] + "\n")
f.write(magicTag + "DATA\n")
for item in self.todo:
f.write(item.toString())
f.write("\n")
f.close()
print "Tasks exported to " + filename
except Exception, e:
self.showError("Error trying to export the file.\n" + str(e))
def importTasks(self, filename):
success = False
orgNTasks = len(self.todo)
if filename == "":
self.showError("You must supply the name of the file to import.")
return success
try:
self.splitFile(filename, True)
if len(self.todo) == orgNTasks:
self.showError("Failed to find any tasks to import.")
else:
success = True
except Exception, e:
self.showError("Error importing tasks. " + str(e))
return success
def createFile(self, filename):
success = False
if os.path.exists(filename):
self.showError("Sorry but " + filename + " already exists.")
else:
try:
f = open(filename, "wb")
f.write("#!/usr/bin/env python\n")
f.write("#" + ruler + "\n")
f.close()
success = True
except Exception, e:
self.showError("Error trying to create the file " + filename + ". " + str(e))
return success
def save(self, filename):
if filename != "" or self.autoSave:
self.forceSave(filename)
else:
self.dirty = True
print "Autosave is off, so changes not saved yet."
def forceSave(self, filename):
if filename == "":
filename = self.filename
tmpFilename = filename + ".tmp"
backupFilename = filename + ".bak"
success = False
try:
f = open(tmpFilename,"wb")
f.write("#!/usr/bin/env python\n")
f.write("# -*- coding: utf-8 -*-\n")
f.write("#" + ruler + "\n")
f.write("# Run the script for details of the licence\n")
f.write("# or refer to the notice section later in the file.\n")
f.write("#" + ruler + "\n")
f.write(magicTag + "DATA\n")
for item in self.todo:
f.write(item.toString())
f.write("\n")
f.write(magicTag + "CONFIG\n")
f.write("cfgColor = " + str(gColor.getCodeSet()) + "\n")
f.write("cfgAutoSave = " + str(self.autoSave) + "\n")
f.write("cfgReviewMode = " + str(self.review) + "\n")
f.write("cfgSysCalls = " + str(self.sysCalls) + "\n")
f.write("cfgEditorNt = \"" + cfgEditorNt + "\"\n")
f.write("cfgEditorPosix = \"" + cfgEditorPosix + "\"\n")
f.write("cfgShortcuts = " + str(self.shortcuts) + "\n")
f.write("cfgAbbreviations = " +str(globalAbbr.toString()) +"\n")
f.write("cfgPAbbreviations = " +str(globalPAbbr.toString()) +"\n")
f.write(magicTag + "CODE\n")
for codeline in self.code:
f.write(codeline.rstrip())
f.write("\n")
f.close()
success = True
except Exception, e:
self.showError("Error trying to save the file.\n" + str(e))
if success:
try:
os.remove(backupFilename)
except Exception:
pass
try:
oldstat = os.stat(filename)
os.rename(filename, backupFilename)
os.rename(tmpFilename, filename)
os.chmod(filename, stat.S_IMODE(oldstat.st_mode)) # ensure permissions carried over
self.filename = filename
self.dirty = False
print "Tasks saved."
except Exception, e:
self.showError("Error trying to rename the backups.\n" + str(e))
def moveTo(self, indexStr):
try:
index = int(indexStr, 10)
if index < 0 or index > len(self.todo) - 1:
self.showError("Sorry but there is no task " + indexStr)
else:
if not self.isViewable(self.todo[index]):
print "Switching off your filter so that the task can be displayed."
self.clearFilterArray(False)
self.currentTask = index
except ValueError:
self.showError("Unable to understand the task " + indexStr + " you want to show.")
def moveToVisible(self):
start = self.currentTask
find = True
if start < 0 or start >= len(self.todo):
return
while not self.isViewable(self.todo[self.currentTask]):
self.incTaskLoop()
if self.currentTask == start:
print "Nothing matched your filter. Removing your filter so that the current task can be displayed."
self.clearFilterArray(False)
break
def decrypt(self, indexStr):
index = self.getRequiredTask(indexStr)
if index < 0 or index > len(self.todo) - 1:
self.showError("Sorry but there is no task " + indexStr + " to show.")
elif self.todo[index].hasHiddenTask():
ec = Encryptor()
print WordWrapper(gMaxLen).wrap(ec.enterKeyAndDecrypt(self.todo[index].getHiddenTask()))
else:
print "Task ", index, " has no encrypted data."
def moveTask(self, indexStr, where):
success = False
if indexStr == "":
print "You must supply the number of the task to move."
return False
try:
index = self.getRequiredTask(indexStr)
if index < 0 or index > len(self.todo) - 1:
self.showError("Sorry but there is no task " + indexStr + " to move.")
elif where == self.MOVE_DOWN:
if index <= len(self.todo) - 2:
item = self.todo[index]
self.todo[index] = self.todo[index + 1]
self.todo[index + 1] = item
print "Task ", index, " moved down."
success = True
else:
self.showError("Task " + str(index) + " is already at the bottom.")
else:
if index > 0:
if where == self.MOVE_TOP:
self.todo.insert(0, self.todo.pop(index))
else:
dest = index - 1
item = self.todo[dest]
self.todo[dest] = self.todo[index]
self.todo[index] = item
print "Task ", index, " moved up."
success = True
else:
self.showError("Task " + str(index) + " is already at the top.")
except ValueError:
self.showError("Unable to understand the task " + indexStr + " you want to move.")
return success
def clear(self):
cleared = False
if safeRawInput("Are you really sure you want to remove everything? Yes or No? >>>").upper() != "YES":
print("Nothing has been removed.")
else:
del self.todo[0:]
self.currentTask = 0
cleared = True
return cleared
def getRequiredTask(self, indexStr):
if indexStr == "^" or indexStr.upper() == "THIS":
index = self.currentTask
else:
try:
index = int(indexStr, 10)
except ValueError:
index = -1
return index
def archiveTask(self, indexStr):
doit = False
line = indexStr.split(" ", 1)
if len(line) > 1:
indexStr = line[0]
entry = line[1]
else:
entry = ""
index = self.getRequiredTask(indexStr)
if index < 0 or index > len(self.todo) - 1:
self.showError("Sorry but there is no task " + indexStr + " to mark as done and archive.")
else:
if indexStr == "^" or indexStr.upper() == "THIS":
doit = True
else:
print "Are you sure you want to archive: ' " + self.todo[index].toStringSimple() + "'"
if safeRawInput("Enter Yes to archive this task? >>>").upper() == "YES":
doit = True
if doit:
newItem = self.createItem(":d+0")
self.todo[index].copy(newItem, TodoItem.MODIFY)
newItem = self.createItem(entry + " @Archived")
self.todo[index].copy(newItem, TodoItem.APPEND)
if self.writeArchive(self.todo[index]):
self.todo[index:index + 1] = []
print "Task ", index, " has been archived."
else:
doit = False
print "Task ", index, " marked as archived but not removed."
else:
print "Task ", index, " has not been archived."
return doit
def removeTask(self, indexStr):
doit = False
index = self.getRequiredTask(indexStr)
if index < 0 or index > len(self.todo) - 1:
self.showError("Sorry but there is no task " + indexStr + " to delete.")
else:
if indexStr == "^" or indexStr.upper() == "THIS":
doit = True
else:
print "Are you sure you want to remove ' " + self.todo[index].toStringSimple() + "'"
if safeRawInput("Enter Yes to delete this task? >>>").upper() == "YES":
doit = True
if doit:
self.todo[index:index + 1] = []
print "Task ", index, " has been removed."
else:
print "Task ", index, " has not been removed."
return doit
def substituteText(self, indexStr):
line = indexStr.split(" ", 1)
if len(line) > 1:
indexStr = line[0]
entry = line[1]
else:
self.showError("You need to define the task and substitution phrases. e.g SUB 0 /old/new/")
return False
success = False
if indexStr == "":
print "You must supply the number of the task to change."
return False
index = self.getRequiredTask(indexStr)
if index < 0 or index > len(self.todo) - 1:
self.showError("Sorry but there is no task " + indexStr)
else:
text = entry.replace("/", "\n")
text = text.replace("\\\n","/")
phrases = text.split("\n")
if len(phrases) != 4:
self.showError("The format of the command is incorrect. The substitution phrases should be /s1/s2/ ")
return False
oldText = self.todo[index].getTask()
newText = oldText.replace(phrases[1], phrases[2])
if newText == oldText:
self.showError("Nothing has changed.")
return False
newItem = self.createItem(newText)
if newItem.hasError():
print "With the substitution the task had errors:"
print newItem.getError()
print "Task ", index, " is unchanged."
else:
if newItem.hasHiddenTask():
clearScreen(self.sysCalls)
self.showError("It isn't possible to create private or secret data by using the substitition command.")
else:
self.todo[index].copy(newItem, TodoItem.MODIFY)
print "Task ", index, " has been changed."
success = True
return success
def modifyTask(self, indexStr, replace, externalEditor = False):
line = indexStr.split(" ", 1)
if len(line) > 1:
indexStr = line[0]
entry = line[1]
else:
entry = ""
success = False
if indexStr == "":
print "You must supply the number of the task to change."
return
index = self.getRequiredTask(indexStr)
if index < 0 or index > len(self.todo) - 1:
self.showError("Sorry but there is no task " + indexStr)
else:
if entry == "":
if externalEditor:
exEdit = EditorLauncher()
(key, entry) = self.todo[index].toStringEditable()
entry = exEdit.edit(entry)
else:
if replace == TodoItem.REPLACE:
print "This task will completely replace the existing entry,"
print "including any projects and actions."
elif replace == TodoItem.MODIFY:
print "Only the elements you add will be replaced. So, for example,"
print "if you don't enter any projects the original projects will remain."
else:
print "Elements you enter will be appended to the current task"
entry = safeRawInput("Enter new details >>>")
if entry != "":
if replace == TodoItem.APPEND:
newItem = self.createItem(entry, password="unused") # we will discard the encrypted part on extend
elif externalEditor:
newItem = self.createItem(entry, password = key)
else:
newItem = self.createItem(entry)
if newItem.hasHiddenTask():
clearScreen(self.sysCalls)
if newItem.hasError():
print "The task had errors:"
print newItem.getError()
print "Task ", index, " is unchanged."
else:
if newItem.hasHiddenTask() and replace == TodoItem.APPEND:
self.showError("It isn't possible to extend the encrypted part of a task.\nThis part is ignored.")
self.todo[index].copy(newItem, replace)
print "Task ", index, " has been changed."
success = True
else:
print "Task ", index, " has not been touched."
return success
def incTask(self):
if self.currentTask < len(self.todo) - 1:
self.currentTask = self.currentTask + 1
def incTaskLoop(self):
if self.currentTask < len(self.todo) - 1:
self.currentTask = self.currentTask + 1
else:
self.currentTask = 0
def decTask(self):
if self.currentTask > 0:
self.currentTask = self.currentTask - 1
def decTaskLoop(self):
if self.currentTask > 0:
self.currentTask = self.currentTask - 1
else:
self.currentTask = len(self.todo) - 1
def printItemTruncated(self, index, leader):
if len(self.todo) < 1:
print leader, "no tasks"
else:
scrnline = leader + "[%02d] %s" % (index, self.todo[index].toStringSimple())
if usePlugin:
print ikogPlugin.modifyShortOutput(scrnline)
elif len(scrnline) > gMaxLen:
print scrnline[0:gMaxLen - 3] + "..."
else:
print scrnline
def printItem(self, index, colorType):
if len(self.todo) < 1:
self.output("There are no tasks to be done.\n", 0)
nlines = 1
else:
wrapper = WordWrapper(gMaxLen)
scrnline = wrapper.wrap("[%02d] %s" % (index, self.todo[index].toStringSimple()))
if colorType == "row0":
style = "class=\"evenTask\""
else:
style = "class=\"oddTask\""
self.output("<div %s>[%02d] %s</div>\n" % (style, index, self.todo[index].toStringSimple()),
gColor.code(colorType) + scrnline + gColor.code("normal") + "\n" )
nlines = wrapper.getNLines()
return nlines
def printItemVerbose(self, index):
if len(self.todo) < 1:
print "There are no tasks to be done."
else:
self.showFilter()
wrapper = WordWrapper(gMaxLen)
scrnline = wrapper.wrap("[%02d] %s" % (index, self.todo[index].toStringVerbose()))
if usePlugin:
print ikogPlugin.modifyVerboseOutput(scrnline)
else:
print scrnline
def clearFilterArray(self, local):
if local:
self.localFilters = []
self.localFilterText = ""
else:
self.globalFilters = []
self.globalFilterText = ""
def setFilterArray(self, local, requiredFilter):
filters = requiredFilter.split()
if local:
destination = self.localFilters
else:
destination = self.globalFilters
destination[:] = []
humanVersion = ""
for word in filters:
if word[0:1] == "-":
invert = True
word = word[1:]
else:
invert = False
if word[0:2].upper() == ":D" and len(word) > 2:
filter = ":D" + TodoItem("").parseDate(word[2:].strip(), False)
elif word[0:2].lower() == ":p":
filter = globalPAbbr.expandProject(word)
else:
filter = globalAbbr.expandAction(word)
if invert:
filter = "-" + filter
destination.append(filter)
if humanVersion != "":
humanVersion = humanVersion + " " + filter
else:
humanVersion = filter
if local:
for filter in self.globalFilters:
destination.append(filter)
if humanVersion != "":
humanVersion = humanVersion + " " + filter
else:
humanVersion = filter
if local:
self.localFilterText = humanVersion
else:
self.globalFilterText = humanVersion
def isViewable(self, item):
if len(self.globalFilters) == 0 and len(self.localFilters) == 0:
return True
overallView = True
ored = False
if len(self.localFilters) > 0:
filterArray = self.localFilters
else:
filterArray = self.globalFilters
if "or" in filterArray or "OR" in filterArray:
fast = False
else:
fast = True
for filter in filterArray:
if filter.upper() == "OR":
ored = True
continue
view = False
usePriority = False
mustHave = False
if filter[0:1] == "+":
filter = filter[1:]
mustHave = True
if filter[0:1] == "-":
invert = True
filter = filter[1:]
else:
invert = False
try:
if filter[0:1] == "#":
priority = int(filter[1:], 10)
usePriority = True
except ValueError:
priority = 0
if usePriority:
if self.exactPriority:
if item.hasPriority(priority):
view = True
elif item.hasPriorityOrAbove(priority):
view = True
elif filter[0:2].upper() == ":D":
if item.hasDate(filter[2:]):
view = True
elif filter[0:2].upper() == ":P":
view = item.hasProject(filter)
elif filter[0:1].upper() == "@":
view = item.hasAction(filter)
elif item.hasWord(filter):
view = True
if invert:
view = (view != True)
if ored:
if view == True:
overallView = True
break
else:
if view == False:
overallView = False
if fast or mustHave:
break
ored = False
return overallView
def listByAction(self):
index = SearchIndex()
for item in self.todo:
index.addCollection(item.getActions())
index.sort()
(n, value) = index.getFirstItem()
print ruler
self.showFilter()
while n >= 0:
if not gColor.usingColor() and n > 0:
div = True
else:
div = False
self.setFilterArray(True, "+" + value)
self.printList(div, "<H2 class=\"hAction\">" + value + "</H2>\n", gColor.code("title") + "\n" + value + "\n" + gColor.code("title"))
self.clearFilterArray(True)
(n, value) = index.getNextItem(n)
print ruler
def listByProject(self):
index = SearchIndex()
for item in self.todo:
index.addCollection(item.getProjects())
index.sort()
(n, value) = index.getFirstItem()
print ruler
self.showFilter()
while n >= 0:
if not gColor.usingColor() and n > 0:
div = True
else:
div = False
self.setFilterArray(True, "+:p" + value)
self.printList(div, "<H2 class =\"hProject\">Project: " + value + "</H2>\n", gColor.code("title") + "\nProject: " + value + "\n" + gColor.code("normal"))
self.clearFilterArray(True)
(n, value) = index.getNextItem(n)
print ruler
def listByDate(self):
index = SearchIndex()
for item in self.todo:
index.add(item.getDate())
index.sort()
(n, value) = index.getFirstItem()
print ruler
self.showFilter()
while n >= 0:
if not gColor.usingColor() and n > 0:
div = True
else:
div = False
self.setFilterArray(True, "+:D" + value)
self.printList(div, "<H2 class =\"hDate\">Date: " + value + "</H2>\n", gColor.code("title") + "\nDate: " + value + "\n" + gColor.code("normal"))
self.clearFilterArray(True)
(n, value) = index.getNextItem(n)
print ruler
def showFilter(self):
if self.globalFilterText != "":
self.output("<H3 class =\"hFilter\">Filter = " + self.globalFilterText + "</H3>\n",
gColor.code("bold") + "Filter = " + self.globalFilterText + "\n" + gColor.code("normal"))
def showLocalFilter(self):
if self.localFilterText != "":
self.output("<H3 class =\"hFilter\">Filter = " + self.localFilterText + "</H3>\n",
gColor.code("bold") + "Filter = " + self.localFilterText + "\n" + gColor.code("normal"))
def printList(self, div, outHtml, outStd):
self.doPrintList(-1, div, outHtml, outStd)
def printShortList(self, line):
count = 0
try:
count = int(line, 10)
except ValueError:
self.showError("Didn't understand the number of tasks you wanted listed.")
self.doPrintList(count, False, "", "")
def doPrintList(self, limitItems, div, outHtml, outStd):
n = 0
displayed = 0
count = 0
color = "row0"
first = True
maxlines = 20
if outHtml != "":
self.outputHtml("<div class=\"itemGroup\">\n")
for item in self.todo:
if self.isViewable(item):
if first:
if div:
print divider
self.output(outHtml, outStd)
if not gColor.usingColor() and not first:
print divider
count = count + 1
count = count + self.printItem(n, color)
first = False
if color == "row0":
color = "row1"
else:
color = "row0"
displayed = displayed + 1
n = n + 1
if limitItems >= 0 and displayed >= limitItems:
break
if count >= maxlines:
if self.htmlFile == "":
msg = safeRawInput("---press Enter for more. Enter s to skip: ")
if len(msg) > 0 and msg.strip().upper()[0] == "S":
break;
count = 0
if outHtml != "":
self.outputHtml("</div>\n")
def printHelp(self, lines):
ListViewer(24).show(lines,"!PAUSE!")
def splitFile(self, filename, dataOnly):
inData = False
inCode = False
inCfg = False
f = open(filename, 'r')
line = f.readline()
if line[0:2] == "#!":
line = f.readline()
while line != "":
if line.find(magicTag + "DATA") == 0:
inData = True
inCode = False
inCfg = False
elif line.find(magicTag + "CONFIG") == 0:
inData = False
inCode = False
inCfg = True
elif line.find(magicTag + "CODE") == 0:
inCode = True
inData = False
inCfg = False
if dataOnly:
break
elif inCode:
self.code.append(line)
elif inCfg:
self.processCfgLine(line)
elif inData:
line = line.strip()
if len(line) > 0 and line[0] == "#":
line = line[1:].strip()
if len(line) > 0:
newItem = self.createItem(line)
newItem.getError()
self.todo.append(newItem)
line = f.readline()
f.close()
def createItem(self, line, password = ""):
item = TodoItem(line, password)
return item
def outputHtml(self, html):
if self.htmlFile != "":
if usePlugin:
self.htmlFile.write(ikogPlugin.modifyFileOutputHtml(html))
else:
self.htmlFile.write(html)
def output(self, html, stdout):
if self.htmlFile != "":
#self.htmlFile.write(html.replace("\n", "<br>\n"))
if usePlugin:
self.htmlFile.write(ikogPlugin.modifyFileOutput(html))
else:
self.htmlFile.write(html)
if stdout == 0:
if usePlugin:
print ikogPlugin.modifyOutput(html),
else:
print html,
else:
if usePlugin:
print ikogPlugin.modifyOutput(stdout),
else:
print stdout,
def startHtml(self, title):
htmlFilename = self.filename + ".html"
try:
printHeader = True
self.htmlFile = open(htmlFilename, "w")
if usePlugin:
printHeader = ikogPlugin.showHeader()
if printHeader:
self.htmlFile.write("<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\" \"http://www.w3.org/TR/html4/loose.dtd\">\n")
self.htmlFile.write("<META HTTP-EQUIV=\"Content-Type\" CONTENT=\"text/html; charset=UTF-8\">\n")
self.htmlFile.write("<html>\n<head>\n")
self.htmlFile.write("<style>\n")
self.htmlFile.write(".footer {text-align:center;}\n")
self.htmlFile.write("</style>\n")
self.htmlFile.write("<link rel=\"stylesheet\" href=\"ikog.css\" type=\"text/css\">\n")
self.htmlFile.write("</head>\n<body>\n")
self.htmlFile.write("<div class=\"header\">\n")
self.htmlFile.write("<H1 class=\"hTitle\">iKog Todo List</H1>\n")
self.htmlFile.write("<H2 class=\"hSubTitle\">" + title + " printed " + date.today().isoformat() + "</H1>\n")
self.htmlFile.write("</div>\n")
self.htmlFile.write("<div class=\"taskArea\">\n")
if usePlugin:
self.htmlFile.write(ikogPlugin.postHtmlHeader())
except Exception:
print "Failed to create output file:", htmlFilename
self.htmlFile = ""
def endHtml(self):
name = self.htmlFile.name
success = False
try:
printFooter = True
if usePlugin:
self.htmlFile.write(ikogPlugin.preHtmlFooter())
printFooter = ikogPlugin.showFooter()
if printFooter:
self.htmlFile.write("</div>\n")
self.htmlFile.write("<div class=\"footer\">\n")
self.htmlFile.write("--- end of todo list ---<br>\n")
self.htmlFile.write("Created using " + notice[0] + "\n<br>" + notice[1] + "<br>\n")
self.htmlFile.write("</div>\n")
self.htmlFile.write("</body>\n</html>\n")
self.htmlFile.close()
self.htmlFile = ""
print "HTML file " + name + " created."
success = True
except Exception, e:
self.showError("Error writing to file. " + str(e))
if success:
try:
safeName = os.path.abspath(name).replace("\\","/")
safeName = "file://" + urllib.quote(safeName," /:")
webbrowser.open(safeName)
except Exception, e:
self.showError("Unable to launch html output. " + str(e))
class Abbreviations:
def __init__(self, project = False):
self.default(project)
def default(self,project):
if project:
self.abbrevs = {}
else:
self.abbrevs = {"@A":"@Anywhere","@C":"@Computer",
"@D":"@Desk", "@E": "@Errands",
"@H":"@Home", "@I":"@Internet","@L":"@Lunch", "@M":"@Meeting", "@N":"@Next",
"@P":"@Phone", "@Pw":"@Password", "@S":"@Someday/Maybe",
"@O":"@Other", "@W4":"@Waiting_For", "@W":"@Work"}
def setAbbreviations(self, abbr):
self.abbrevs.update(abbr)
def addAbbreviation(self, key, word):
self.abbrevs.update({key.title():word})
def removeAbbreviation(self, key):
key = key.title()
if self.abbrevs.has_key(key):
del self.abbrevs[key]
return True
return False
def expandAction(self, action):
if action[0:1] != "@":
return action
action = action.title()
if self.abbrevs.has_key(action):
return self.abbrevs[action]
return action
def expandProject(self, project):
if not project.lower().startswith(":p"):
return project
project = project.title()
if self.abbrevs.has_key(project):
return self.abbrevs[project]
return project
def toString(self):
return str(self.abbrevs)
def toStringVerbose(self):
output = ""
index = 0
for key in self.abbrevs:
output = output + key.ljust(5) + " = " + self.abbrevs[key].ljust(30)
index = index + 1
if index % 2 == 0:
output = output + "\n"
if index % 2 != 0:
output = output + "\n"
return output
class SearchIndex:
def __init__(self):
self.items = []
def add(self, ent):
if ent != "" and not ent in self.items:
self.items.append(ent)
def addCollection(self, collection):
for ent in collection:
if ent != "" and not ent in self.items:
self.items.append(ent)
def sort(self):
self.items.sort()
def getFirstItem(self):
if len(self.items) > 0:
return (0, self.items[0])
else:
return (-1, "")
def getNextItem(self, count):
count = count + 1
if count > len(self.items) - 1:
return (-1, "")
else:
return (count, self.items[count])
return
class TodoItem:
ENCRYPTION_MARKER = "{}--xx"
REPLACE = 0
MODIFY = 1
APPEND = 2
NOT_DUE_PRIORITY = 0
DEFAULT_PRIORITY = 5
OVERDUE_PRIORITY = 11
MEETING_PRIORITY = 10
def __init__(self,line, password = ""):
self.actions = []
self.task = ""
self.hiddenTask = ""
self.projects = []
self.priority = -1
self.when = ""
self.created = date.today().isoformat()
self.error = ""
self.autoAction = False
self.autoProject = False
self.nullDate = False
self.parse(line, password)
def makeSafeDate(self, year, month, day):
done = False
newDate = ""
while not done:
if day < 1:
done = True
else:
try:
newDate = date(year, month, day)
done = True
except ValueError:
day = day - 1
newDate = ""
return newDate
def parseDate(self, dateStr, quiet):
dateStr = dateStr.replace("/","-")
dateStr = dateStr.replace(":","-")
entry = dateStr.split("-")
n = len(entry)
if n < 1 or n > 3:
fail = True
elif dateStr == "0":
self.nullDate = True
return ""
else:
try:
now = date.today()
if dateStr[0:1] == "+":
days = int(dateStr[1:].strip(), 10)
when = now + timedelta(days)
else:
if n == 3:
year = int(entry[0], 10)
month = int(entry[1], 10)
day = int(entry[2], 10)
elif n == 2:
year = now.year
month = int(entry[0], 10)
day = int(entry[1], 10)
else:
year = now.year
month = now.month
day = int(entry[0], 10)
if day < now.day:
month = month + 1
if month > 12:
month = 1
year = year + 1
if year < 1000:
year = year + 2000
when = self.makeSafeDate(year, month, day)
if when == "":
fail = True
else:
fail = False
self.nullDate = False
except ValueError:
fail = True
except:
fail = True
if fail:
self.addError("Could not decode the date. Use :dYYYY/MM/DD")
return ""
else:
return when.isoformat()
def parse(self, line, password):
self.error = ""
words = line.split(" ")
taskToHide = ""
encrypt = ""
start = 0
ecmLen = len(self.ENCRYPTION_MARKER)
for word in words[start:]:
wordUC = word.strip().upper()
if len(word) > 0:
if encrypt != "":
taskToHide = taskToHide + word.strip() + " "
elif word[0:ecmLen] == self.ENCRYPTION_MARKER:
self.hiddenTask = word[ecmLen:]
elif wordUC.startswith("<PRIVATE>") or wordUC.startswith("<SECRET>") or wordUC.startswith("<S>") or wordUC.startswith("<P>"):
encrypt = wordUC[1]
try:
pos = word.index(">")
taskToHide = taskToHide + word[pos + 1:].strip() + " "
except ValueError:
pass
elif word[0] == "@" and len(word) > 1:
if wordUC == "@DATE":
self.addError("@Date contexts should not be entered. Use :dYYYY-MM-DD")
else:
act = globalAbbr.expandAction(word.strip())
if not act in self.actions:
self.actions.append(act)
elif word[0:1] == "#" and len(word) > 1 and self.priority == -1:
try:
self.priority = int(word[1:].strip(), 10)
if self.priority < 1:
self.priority = 0
elif self.priority > 10:
self.priority = 10
except ValueError:
self.addError("Did not understand priority.")
self.priority = -1
elif wordUC[0:2] == ":P" and len(word) > 2:
proj = globalPAbbr.expandProject(word.strip())[2:].title()
if not proj in self.projects:
self.projects.append(proj)
elif wordUC[0:8] == ":CREATED" and len(word) > 8:
self.created = word[8:].strip()
elif wordUC[0:2] == ":D" and len(word) > 2:
self.when = self.parseDate(word[2:].strip(), False)
else:
self.task = self.task + word.strip() + " "
if taskToHide != "":
ec = Encryptor()
if encrypt == "S":
if ec.setType(ec.TYPE_AES) != ec.TYPE_AES:
self.addError("AES encryption is not available.")
taskToHide = ""
else:
ec.setType(ec.TYPE_OBSCURED)
if taskToHide != "":
if password == "":
self.hiddenTask = ec.enterKeyAndEncrypt(taskToHide)
else:
ec.setKey(password)
self.hiddenTask = ec.encrypt(taskToHide)
if len(self.actions) == 0:
self.actions.append("@Anywhere")
self.autoAction = True
if len(self.projects) == 0:
self.projects.append("None")
self.autoProject = True
def addError(self, err):
if len(self.error) > 0:
self.error = self.error + "\n"
self.error = self.error + err
def hasError(self):
return self.error != ""
def getError(self):
tmp = self.error
self.error = ""
return tmp
def hasWord(self, word):
return (self.task.upper().find(word.upper()) >= 0)
def hasAction(self, loc):
if self.when != "" and loc.upper() == "@DATE":
return True
else:
return loc.title() in self.actions
def copy(self, todoItem, replace):
if replace == TodoItem.REPLACE or len(todoItem.task.strip()) > 0:
if replace == TodoItem.APPEND:
self.task = self.task + " ..." + todoItem.task
else:
self.task = todoItem.task
if replace == TodoItem.REPLACE or todoItem.autoAction == False:
if replace != TodoItem.APPEND:
self.actions = []
for loc in todoItem.actions:
if not loc in self.actions:
self.actions.append(loc)
if replace == TodoItem.REPLACE or todoItem.autoProject == False:
if replace != TodoItem.APPEND:
self.projects = []
for proj in todoItem.projects:
if not proj in self.projects:
self.projects.append(proj)
if replace == TodoItem.REPLACE or (todoItem.when != "" or todoItem.nullDate == True):
self.when = todoItem.when
if todoItem.priority >= 0:
self.priority = todoItem.priority
if replace == TodoItem.REPLACE or len(todoItem.hiddenTask.strip()) > 0:
if replace == TodoItem.APPEND:
pass
else:
self.hiddenTask = todoItem.hiddenTask
def hasHiddenTask(self):
return self.hiddenTask != ""
def hasTask(self):
return len(self.task.strip()) > 0
def hasProject(self, proj):
if proj[0:2].upper() == ":P":
proj = proj[2:]
return proj.title() in self.projects
def hasDate(self, dt):
dt = self.parseDate(dt, True)
if dt == "":
return False
else:
return self.when == dt
def hasPriorityOrAbove(self, priority):
return (self.getEffectivePriority() >= priority)
def hasPriority(self, priority):
return (self.getEffectivePriority() == priority)
def getHiddenTask(self):
return self.hiddenTask
def getTask(self):
return self.task
def getActions(self):
if self.when == "":
return self.actions
else:
return self.actions + ["@Date"]
def getProjects(self):
return self.projects
def getDate(self):
return self.when
def getPriority(self):
if self.priority < 0:
return self.DEFAULT_PRIORITY
else:
return self.priority
def getEffectivePriority(self):
userP = self.getPriority()
if self.when != "":
if self.when <= date.today().isoformat():
userP = self.OVERDUE_PRIORITY + userP
if self.hasAction("@Meeting"):
userP = userP + self.MEETING_PRIORITY
else:
userP = self.NOT_DUE_PRIORITY
return userP
def toString(self):
entry = "#"
entry = entry + " " + self.task
for action in self.actions:
entry = entry + " " + action
for project in self.projects:
entry = entry + " :p" + project
entry = entry + " :created" + self.created
if self.when != "":
entry = entry + " :d" + self.when
if self.priority >= 0:
entry = entry + " #" + str(self.priority)
if self.hiddenTask != "":
entry = entry + " " + self.ENCRYPTION_MARKER + self.hiddenTask
return entry
def toStringEditable(self, includeHidden = False):
password = ""
entry = ""
if self.when != "":
entry = entry + ":d" + self.when + " "
entry = entry + "%s #%d" % (self.task, self.getPriority())
if len(self.actions) > 0:
for action in self.actions:
entry = entry + " " + action
if len(self.projects) > 0:
for project in self.projects:
# skip the none tag
if project != "None":
entry = entry + " :p" + project
if self.hiddenTask != "" and includeHidden:
ec = Encryptor()
entry = entry + " <" + Encryptor().getSecurityClass(self.hiddenTask)[0:1] + ">"
entry = entry + ec.enterKeyAndDecrypt(self.hiddenTask)
password = ec.getKey()
return (password, entry.strip())
def toStringSimple(self):
entry = ""
if self.when != "":
entry = entry + "@Date " + self.when + " "
entry = entry + "%s #%d" % (self.task, self.getPriority())
if self.hiddenTask != "":
entry = entry + " <*** " + Encryptor().getSecurityClass(self.hiddenTask) + " ***> "
if len(self.actions) > 0:
for action in self.actions:
entry = entry + " " + action
if len(self.projects) > 0:
first = True
for project in self.projects:
# skip the none tag
if project != "None":
if first:
entry = entry + " Projects: " + project
first = False
else:
entry = entry + ", " + project
entry = entry + " [" + self.created + "]"
return entry
def toStringVerbose(self):
entry = gColor.code("title") + self.task
if self.hiddenTask != "":
entry = entry + " <*** " + Encryptor().getSecurityClass(self.hiddenTask) + " ***> "
entry = entry + gColor.code("bold") + "\nPriority: %02d" % (self.getPriority())
if len(self.actions) or self.when != "" > 0:
entry = entry + gColor.code("heading") + "\nContext: "
if self.when != "":
entry = entry + gColor.code("important") + "@Date " + self.when
entry = entry + gColor.code("normal")
for action in self.actions:
entry = entry + " " + action;
if len(self.projects) > 0:
first = True
for project in self.projects:
if project != "None":
if first:
entry = entry + gColor.code("heading") + "\nProjects: " + gColor.code("normal");
entry = entry + project
first = False
else:
entry = entry + ", " + project
entry = entry + gColor.code("normal") + "\nCreated: [" + self.created + "]"
return entry
### Entry point
for line in notice:
print line
pythonVer = platform.python_version()
ver = pythonVer.split(".")
if int(ver[0]) < gReqPythonMajor or (int(ver[0]) == gReqPythonMajor and int(ver[1]) < gReqPythonMinor):
print "\nSorry but this program requires Python ", \
str(gReqPythonMajor) + "." + str(gReqPythonMinor), \
"\nYour current version is ", \
str(ver[0]) + "." + str(ver[1]), \
"\nTo run the program you will need to install the current version of Python."
else:
import webbrowser
# signal.signal(signal.SIGINT, signalHandler)
gColor = ColorCoder(cfgColor)
globalAbbr = Abbreviations()
globalPAbbr = Abbreviations(project=True)
commandList = []
if len(sys.argv) > 2:
command = ""
reopen = sys.argv[1]
if reopen == ".":
reopen = sys.argv[0] + ".dat"
for word in sys.argv[2:]:
if word == "/":
commandList.append(command)
command = ""
else:
command = command + word + " "
commandList.append(command)
elif len(sys.argv) > 1:
reopen = sys.argv[1]
else:
reopen = sys.argv[0] + ".dat"
if usePlugin:
ruler = ikogPlugin.getRuler()
divider = ikogPlugin.getDivider()
while reopen != "":
print commandList
todoList = TodoList(sys.argv[0], reopen)
reopen = todoList.run(commandList)
commandList = []
print "Goodbye"
|
janosgyerik/gtd-cli-py
|
ikog.py
|
Python
|
gpl-3.0
| 101,858
|
[
"VisIt"
] |
d8d8919f3f987043af09279b17c7a081e6f058e2de7204dd09efee632a317041
|
""" Contains the ModeController class."""
# mode_controller.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import logging
import os
from collections import namedtuple
from mpf.system.config import Config
from mpf.system.utility_functions import Util
RemoteMethod = namedtuple('RemoteMethod', 'method config_section kwargs priority',
verbose=False)
"""RemotedMethod is used by other modules that want to register a method to
be called on mode_start or mode_stop.
"""
# Need to define RemoteMethod before import Mode since the mode module imports
# it. So this breaks some rules now. Probably should figure out some other way
# to do this? TODO
from mpf.system.mode import Mode
class ModeController(object):
"""Parent class for the Mode Controller. There is one instance of this in
MPF and it's responsible for loading, unloading, and managing all modes.
Args:
machine: The main MachineController instance.
"""
debug_path = 'system_modules|mode_controller'
def __init__(self, machine):
self.machine = machine
self.log = logging.getLogger('ModeController')
self.debug = True
self.queue = None # ball ending event queue
self.active_modes = list()
self.mode_stop_count = 0
# The following two lists hold namedtuples of any remote components that
# need to be notified when a mode object is created and/or started.
self.loader_methods = list()
self.start_methods = list()
if 'modes' in self.machine.config:
self.machine.events.add_handler('init_phase_4',
self._load_modes)
self.machine.events.add_handler('ball_ending', self._ball_ending,
priority=0)
self.machine.events.add_handler('ball_starting', self._ball_starting,
priority=0)
self.machine.events.add_handler('player_add_success',
self._player_added, priority=0)
self.machine.events.add_handler('player_turn_start',
self._player_turn_start,
priority=1000000)
self.machine.events.add_handler('player_turn_stop',
self._player_turn_stop,
priority=1000000)
def _load_modes(self):
#Loads the modes from the Modes: section of the machine configuration
#file.
for mode in set(self.machine.config['modes']):
self.machine.modes.append(self._load_mode(mode))
def _load_mode(self, mode_string):
"""Loads a mode, reads in its config, and creates the Mode object.
Args:
mode: String name of the mode you're loading. This is the name of
the mode's folder in your game's machine_files/modes folder.
"""
if self.debug:
self.log.debug('Processing mode: %s', mode_string)
config = dict()
# find the folder for this mode:
mode_path = os.path.join(self.machine.machine_path,
self.machine.config['mpf']['paths']['modes'], mode_string)
if not os.path.exists(mode_path):
mode_path = os.path.abspath(os.path.join('mpf', self.machine.config['mpf']['paths']['modes'], mode_string))
# Is there an MPF default config for this mode? If so, load it first
mpf_mode_config = os.path.join(
'mpf',
self.machine.config['mpf']['paths']['modes'],
mode_string,
'config',
mode_string + '.yaml')
if os.path.isfile(mpf_mode_config):
config = Config.load_config_file(mpf_mode_config)
# Now figure out if there's a machine-specific config for this mode, and
# if so, merge it into the config
mode_config_folder = os.path.join(self.machine.machine_path,
self.machine.config['mpf']['paths']['modes'], mode_string, 'config')
found_file = False
for path, _, files in os.walk(mode_config_folder):
for file in files:
file_root, file_ext = os.path.splitext(file)
if file_root == mode_string:
config = Util.dict_merge(config,
Config.load_config_file(os.path.join(path, file)))
found_file = True
break
if found_file:
break
if 'code' in config['mode']:
# need to figure out if this mode code is in the machine folder or
# the default mpf folder
mode_code_file = os.path.join(self.machine.machine_path,
self.machine.config['mpf']['paths']['modes'],
mode_string,
'code',
config['mode']['code'].split('.')[0] + '.py')
if os.path.isfile(mode_code_file): # code is in the machine folder
import_str = (self.machine.config['mpf']['paths']['modes'] +
'.' + mode_string + '.code.' +
config['mode']['code'].split('.')[0])
i = __import__(import_str, fromlist=[''])
if self.debug:
self.log.debug("Loading Mode class code from %s",
mode_code_file)
mode_object = getattr(i, config['mode']['code'].split('.')[1])(
self.machine, config, mode_string, mode_path)
else: # code is in the mpf folder
import_str = ('mpf.' +
self.machine.config['mpf']['paths']['modes'] +
'.' + mode_string + '.code.' +
config['mode']['code'].split('.')[0])
i = __import__(import_str, fromlist=[''])
if self.debug:
self.log.debug("Loading Mode class code from %s",
import_str)
mode_object = getattr(i, config['mode']['code'].split('.')[1])(
self.machine, config, mode_string, mode_path)
else: # no code specified, so using the default Mode class
if self.debug:
self.log.debug("Loading default Mode class code")
mode_object = Mode(self.machine, config, mode_string, mode_path)
return mode_object
def _player_added(self, player, num):
player.uvars['_restart_modes_on_next_ball'] = list()
def _player_turn_start(self, player, **kwargs):
for mode in self.machine.modes:
mode.player = player
def _player_turn_stop(self, player, **kwargs):
for mode in self.machine.modes:
mode.player = None
def _ball_starting(self, queue):
for mode in self.machine.game.player.uvars['_restart_modes_on_next_ball']:
self.log.debug("Restarting mode %s based on 'restart_on_next_ball"
"' setting", mode)
mode.start()
self.machine.game.player.uvars['_restart_modes_on_next_ball'] = list()
def _ball_ending(self, queue):
# unloads all the active modes
if not self.active_modes:
return()
self.queue = queue
self.queue.wait()
self.mode_stop_count = 0
for mode in self.active_modes:
if mode.auto_stop_on_ball_end:
self.mode_stop_count += 1
mode.stop(callback=self._mode_stopped_callback)
if mode.restart_on_next_ball:
self.log.debug("Will Restart mode %s on next ball, mode")
self.machine.game.player.uvars[
'_restart_modes_on_next_ball'].append(mode)
if not self.mode_stop_count:
self.queue.clear()
def _mode_stopped_callback(self):
self.mode_stop_count -= 1
if not self.mode_stop_count:
self.queue.clear()
def register_load_method(self, load_method, config_section_name=None,
priority=0, **kwargs):
"""Used by system components, plugins, etc. to register themselves with
the Mode Controller for anything they need a mode to do when it's
registered.
Args:
load_method: The method that will be called when this mode code
loads.
config_section_name: An optional string for the section of the
configuration file that will be passed to the load_method when
it's called.
priority: Int of the relative priority which allows remote methods
to be called in a specific order. Default is 0. Higher values
will be called first.
**kwargs: Any additional keyword arguments specified will be passed
to the load_method.
Note that these methods will be called once, when the mode code is first
initialized during the MPF boot process.
"""
self.loader_methods.append(RemoteMethod(method=load_method,
config_section=config_section_name, kwargs=kwargs,
priority=priority))
def register_start_method(self, start_method, config_section_name=None,
priority=0, **kwargs):
"""Used by system components, plugins, etc. to register themselves with
the Mode Controller for anything that they a mode to do when it starts.
Args:
start_method: The method that will be called when this mode code
loads.
config_section_name: An optional string for the section of the
configuration file that will be passed to the start_method when
it's called.
priority: Int of the relative priority which allows remote methods
to be called in a specific order. Default is 0. Higher values
will be called first.
**kwargs: Any additional keyword arguments specified will be passed
to the start_method.
Note that these methods will be called every single time this mode is
started.
"""
if self.debug:
self.log.debug('Registering %s as a mode start method. Config section:'
'%s, priority: %s, kwargs: %s', start_method,
config_section_name, priority, kwargs)
self.start_methods.append(RemoteMethod(method=start_method,
config_section=config_section_name, priority=priority,
kwargs=kwargs))
self.start_methods.sort(key=lambda x: x.priority, reverse=True)
def _active_change(self, mode, active):
# called when a mode goes active or inactive
if active:
self.active_modes.append(mode)
else:
self.active_modes.remove(mode)
# sort the active mode list by priority
self.active_modes.sort(key=lambda x: x.priority, reverse=True)
self.dump()
def dump(self):
"""Dumps the current status of the running modes to the log file."""
self.log.info('+=========== ACTIVE MODES ============+')
for mode in self.active_modes:
if mode.active:
self.log.info('| {} : {}'.format(mode.name,
mode.priority).ljust(38) + '|')
self.log.info('+-------------------------------------+')
def is_active(self, mode_name):
if mode_name in [x.name for x in self.active_modes if x._active is True]:
return True
else:
return False
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
|
qcapen/mpf
|
mpf/system/mode_controller.py
|
Python
|
mit
| 13,060
|
[
"Brian"
] |
f8d2c229faef888b10b1e2cee781ca8c2ebaccc03ba8de2be9679da1c26f2fcc
|
# coding=utf-8
from ladybug.epw import EPW
from ladybug.datacollection import HourlyContinuousCollection, MonthlyCollection
from ladybug.designday import DesignDay
from ladybug.analysisperiod import AnalysisPeriod
import os
import pytest
def test_import_epw():
"""Test import standard epw."""
relative_path = './tests/assets/epw/chicago.epw'
abs_path = os.path.abspath(relative_path)
epw_rel = EPW(relative_path)
epw = EPW(abs_path)
assert epw_rel.file_path == os.path.normpath(relative_path)
assert epw_rel.location.city == 'Chicago Ohare Intl Ap'
assert epw.file_path == abs_path
assert epw.location.city == 'Chicago Ohare Intl Ap'
# Check that calling location getter only retrieves location
assert not epw.is_data_loaded
dbt = epw.dry_bulb_temperature
skyt = epw.sky_temperature # test sky temperature calculation
assert epw.is_data_loaded
assert len(dbt) == 8760
assert len(skyt) == 8760
assert epw.ashrae_climate_zone == '5A'
def test_import_tokyo_epw():
"""Test import standard epw from another location."""
path = './tests/assets/epw/tokyo.epw'
epw = EPW(path)
assert not epw.is_header_loaded
assert epw.location.city == 'Tokyo'
assert epw.is_header_loaded
assert not epw.is_data_loaded
dbt = epw.dry_bulb_temperature
assert epw.is_data_loaded
assert len(dbt) == 8760
assert epw.ashrae_climate_zone == '3A'
def test_epw_from_file_string():
"""Test initialization of EPW from a file string."""
relative_path = './tests/assets/epw/chicago.epw'
with open(relative_path, 'r') as epwin:
file_contents = epwin.read()
epw = EPW.from_file_string(file_contents)
assert epw.is_header_loaded
assert epw.is_data_loaded
assert len(epw.dry_bulb_temperature) == 8760
def test_epw_from_missing_values():
"""Test initialization of EPW from missing values."""
epw = EPW.from_missing_values()
assert epw.is_header_loaded
assert epw.is_data_loaded
assert len(epw.dry_bulb_temperature) == 8760
assert list(epw.dry_bulb_temperature.values) == [99.9] * 8760
day_vals = epw.import_data_by_field(2)
assert day_vals[24] == 1
def test_dict_methods():
"""Test JSON serialization methods"""
relative_path = './tests/assets/epw/chicago.epw'
epw = EPW(relative_path)
epw_dict = epw.to_dict()
rebuilt_epw = EPW.from_dict(epw_dict)
assert epw_dict == rebuilt_epw.to_dict()
def test_file_string_methods():
"""Test serialization to/from EPW file strings"""
relative_path = './tests/assets/epw/chicago.epw'
epw = EPW(relative_path)
epw_contents = epw.to_file_string()
rebuilt_epw = EPW.from_file_string(epw_contents)
assert epw.location == rebuilt_epw.location
assert epw.dry_bulb_temperature == rebuilt_epw.dry_bulb_temperature
def test_invalid_epw():
"""Test the import of incorrect file type and a non-existent epw file."""
path = './tests/assets/epw/non-existent.epw'
with pytest.raises(Exception):
epw = EPW(path)
epw.location
path = './tests/assets/stat/chicago.stat'
with pytest.raises(Exception):
epw = EPW(path)
epw.location
def test_import_data():
"""Test the imported data properties."""
relative_path = './tests/assets/epw/chicago.epw'
epw = EPW(relative_path)
assert isinstance(epw.years, HourlyContinuousCollection)
assert isinstance(epw.dry_bulb_temperature, HourlyContinuousCollection)
assert isinstance(epw.dew_point_temperature, HourlyContinuousCollection)
assert isinstance(epw.relative_humidity, HourlyContinuousCollection)
assert isinstance(epw.atmospheric_station_pressure, HourlyContinuousCollection)
assert isinstance(epw.extraterrestrial_horizontal_radiation, HourlyContinuousCollection)
assert isinstance(epw.extraterrestrial_direct_normal_radiation, HourlyContinuousCollection)
assert isinstance(epw.horizontal_infrared_radiation_intensity, HourlyContinuousCollection)
assert isinstance(epw.global_horizontal_radiation, HourlyContinuousCollection)
assert isinstance(epw.direct_normal_radiation, HourlyContinuousCollection)
assert isinstance(epw.diffuse_horizontal_radiation, HourlyContinuousCollection)
assert isinstance(epw.global_horizontal_illuminance, HourlyContinuousCollection)
assert isinstance(epw.direct_normal_illuminance, HourlyContinuousCollection)
assert isinstance(epw.diffuse_horizontal_illuminance, HourlyContinuousCollection)
assert isinstance(epw.zenith_luminance, HourlyContinuousCollection)
assert isinstance(epw.wind_direction, HourlyContinuousCollection)
assert isinstance(epw.wind_speed, HourlyContinuousCollection)
assert isinstance(epw.total_sky_cover, HourlyContinuousCollection)
assert isinstance(epw.opaque_sky_cover, HourlyContinuousCollection)
assert isinstance(epw.visibility, HourlyContinuousCollection)
assert isinstance(epw.ceiling_height, HourlyContinuousCollection)
assert isinstance(epw.present_weather_observation, HourlyContinuousCollection)
assert isinstance(epw.present_weather_codes, HourlyContinuousCollection)
assert isinstance(epw.precipitable_water, HourlyContinuousCollection)
assert isinstance(epw.aerosol_optical_depth, HourlyContinuousCollection)
assert isinstance(epw.snow_depth, HourlyContinuousCollection)
assert isinstance(epw.days_since_last_snowfall, HourlyContinuousCollection)
assert isinstance(epw.albedo, HourlyContinuousCollection)
assert isinstance(epw.liquid_precipitation_depth, HourlyContinuousCollection)
assert isinstance(epw.liquid_precipitation_quantity, HourlyContinuousCollection)
assert isinstance(epw.sky_temperature, HourlyContinuousCollection)
def test_convert_to_ip():
"""Test the method that converts the data to IP units."""
relative_path = './tests/assets/epw/chicago.epw'
epw = EPW(relative_path)
assert epw.dry_bulb_temperature.header.unit == 'C'
assert epw.dry_bulb_temperature.values[0] == -6.1
epw.convert_to_ip()
assert epw.dry_bulb_temperature.header.unit == 'F'
assert epw.dry_bulb_temperature.values[0] == pytest.approx(21.02, rel=1e-2)
epw.convert_to_si()
assert epw.dry_bulb_temperature.header.unit == 'C'
assert epw.dry_bulb_temperature.values[0] == pytest.approx(-6.1, rel=1e-5)
def test_set_data():
"""Test the ability to set the data of any of the epw hourly data."""
relative_path = './tests/assets/epw/chicago.epw'
epw = EPW(relative_path)
epw.dry_bulb_temperature[12] = 20
assert epw.dry_bulb_temperature[12] == 20
epw.dry_bulb_temperature.values = list(range(8760))
assert epw.dry_bulb_temperature.values == tuple(range(8760))
# Test if the set data is not annual
with pytest.raises(Exception):
epw.dry_bulb_temperature = list(range(365))
def test_import_design_conditions():
"""Test the functions that import design conditions."""
relative_path = './tests/assets/epw/chicago.epw'
epw = EPW(relative_path)
assert isinstance(epw.heating_design_condition_dictionary, dict)
assert len(epw.heating_design_condition_dictionary.keys()) == 15
assert isinstance(epw.cooling_design_condition_dictionary, dict)
assert len(epw.cooling_design_condition_dictionary.keys()) == 32
assert isinstance(epw.extreme_design_condition_dictionary, dict)
assert len(epw.extreme_design_condition_dictionary.keys()) == 16
def test_set_design_conditions():
"""Test the functions that set design conditions."""
relative_path = './tests/assets/epw/chicago.epw'
epw = EPW(relative_path)
heat_dict = dict(epw.heating_design_condition_dictionary)
heat_dict['DB996'] = -25
epw.heating_design_condition_dictionary = heat_dict
assert epw.heating_design_condition_dictionary['DB996'] == -25
# Check for when the dictionary has a missing key
wrong_dict = dict(heat_dict)
del wrong_dict['DB996']
with pytest.raises(Exception):
epw.heating_design_condition_dictionary = wrong_dict
# Check for when the wrong type is assigned
heat_list = list(epw.heating_design_condition_dictionary.keys())
with pytest.raises(Exception):
epw.heating_design_condition_dictionary = heat_list
cool_dict = dict(epw.cooling_design_condition_dictionary)
cool_dict['DB004'] = 40
epw.cooling_design_condition_dictionary = cool_dict
assert epw.cooling_design_condition_dictionary['DB004'] == 40
extremes_dict = dict(epw.extreme_design_condition_dictionary)
extremes_dict['WS010'] = 20
epw.extreme_design_condition_dictionary = extremes_dict
assert epw.extreme_design_condition_dictionary['WS010'] == 20
def test_import_design_days():
"""Test the functions that import design days."""
relative_path = './tests/assets/epw/chicago.epw'
epw = EPW(relative_path)
assert isinstance(epw.annual_heating_design_day_996, DesignDay)
assert epw.annual_heating_design_day_996.dry_bulb_condition.dry_bulb_max == -20.0
assert isinstance(epw.annual_heating_design_day_990, DesignDay)
assert epw.annual_heating_design_day_990.dry_bulb_condition.dry_bulb_max == -16.6
assert isinstance(epw.annual_cooling_design_day_004, DesignDay)
assert epw.annual_cooling_design_day_004.dry_bulb_condition.dry_bulb_max == 33.3
assert isinstance(epw.annual_cooling_design_day_010, DesignDay)
assert epw.annual_cooling_design_day_010.dry_bulb_condition.dry_bulb_max == 31.6
def test_import_extreme_weeks():
"""Test the functions that import the extreme weeks."""
relative_path = './tests/assets/epw/chicago.epw'
epw = EPW(relative_path)
ext_cold = list(epw.extreme_cold_weeks.values())[0]
ext_hot = list(epw.extreme_hot_weeks.values())[0]
assert isinstance(ext_cold, AnalysisPeriod)
assert len(ext_cold.doys_int) == 7
assert (ext_cold.st_month, ext_cold.st_day, ext_cold.end_month,
ext_cold.end_day) == (1, 27, 2, 2)
assert isinstance(ext_hot, AnalysisPeriod)
assert len(ext_hot.doys_int) == 7
assert (ext_hot.st_month, ext_hot.st_day, ext_hot.end_month,
ext_hot.end_day) == (7, 13, 7, 19)
def test_import_typical_weeks():
"""Test the functions that import the typical weeks."""
relative_path = './tests/assets/epw/chicago.epw'
epw = EPW(relative_path)
typ_weeks = list(epw.typical_weeks.values())
assert len(typ_weeks) == 4
for week in typ_weeks:
assert isinstance(week, AnalysisPeriod)
assert len(week.doys_int) == 7
def test_set_extreme_typical_weeks():
"""Test the functions that set the extreme and typical weeks."""
relative_path = './tests/assets/epw/chicago.epw'
epw = EPW(relative_path)
a_per_cold = AnalysisPeriod(1, 1, 0, 1, 7, 23)
a_per_hot = AnalysisPeriod(7, 1, 0, 7, 7, 23)
a_per_typ = AnalysisPeriod(5, 1, 0, 5, 7, 23)
epw.extreme_cold_weeks = {'Extreme Cold Week': a_per_cold}
epw.extreme_hot_weeks = {'Extreme Hot Week': a_per_hot}
epw.typical_weeks = {'Typical Week': a_per_typ}
assert list(epw.extreme_cold_weeks.values())[0] == a_per_cold
assert list(epw.extreme_hot_weeks.values())[0] == a_per_hot
assert list(epw.typical_weeks.values())[0] == a_per_typ
# Test one someone sets an analysis_period longer than a week.
a_per_wrong = AnalysisPeriod(1, 1, 0, 1, 6, 23)
with pytest.raises(Exception):
epw.extreme_cold_weeks = {'Extreme Cold Week': a_per_wrong}
# Test when someone sets the wrong type of data
with pytest.raises(Exception):
epw.extreme_cold_weeks = a_per_cold
def test_import_ground_temperatures():
"""Test the functions that import ground temperature."""
relative_path = './tests/assets/epw/chicago.epw'
epw = EPW(relative_path)
assert len(epw.monthly_ground_temperature.keys()) == 3
assert tuple(epw.monthly_ground_temperature.keys()) == (0.5, 2.0, 4.0)
assert isinstance(epw.monthly_ground_temperature[0.5], MonthlyCollection)
assert epw.monthly_ground_temperature[0.5].values == \
(-1.89, -3.06, -0.99, 2.23, 10.68, 17.2,
21.6, 22.94, 20.66, 15.6, 8.83, 2.56)
assert epw.monthly_ground_temperature[2].values == \
(2.39, 0.31, 0.74, 2.45, 8.1, 13.21,
17.3, 19.5, 19.03, 16.16, 11.5, 6.56)
assert epw.monthly_ground_temperature[4].values == \
(5.93, 3.8, 3.34, 3.98, 7.18, 10.62,
13.78, 15.98, 16.49, 15.25, 12.51, 9.17)
def test_set_ground_temperatures():
"""Test the functions that set ground temperature."""
relative_path = './tests/assets/epw/chicago.epw'
epw = EPW(relative_path)
grnd_dict = dict(epw.monthly_ground_temperature)
grnd_dict[0.5].values = list(range(12))
epw.monthly_ground_temperature = grnd_dict
assert epw.monthly_ground_temperature[0.5].values == tuple(range(12))
# test when the type is not a monthly collection.
grnd_dict = dict(epw.monthly_ground_temperature)
grnd_dict[0.5] = list(range(12))
with pytest.raises(Exception):
epw.monthly_ground_temperature = grnd_dict
def test_epw_header():
"""Check that the process of parsing the EPW header hasn't changed it."""
relative_path = './tests/assets/epw/chicago.epw'
epw = EPW(relative_path)
with open(relative_path, 'r') as epwin:
header_lines = [epwin.readline() for i in range(8)]
for i in range(len(epw.header)):
line1, line2 = epw.header[i], header_lines[i]
if i in (0, 1, 4, 5, 6, 7):
# These lines should match exactly
assert line1.rstrip() == line2.rstrip()
elif i in (2, 3):
# The order of data in these lines can change and spaces can get deleted
assert len(line1.split(',')) == len(line2.split(','))
def test_write_epw():
"""Test save epw_rel."""
path = './tests/assets/epw/tokyo.epw'
epw = EPW(path)
modified_path = './tests/assets/epw/tokyo_modified.epw'
epw.write(modified_path)
assert os.path.isfile(modified_path)
assert os.stat(modified_path).st_size > 1
os.remove(modified_path)
def test_write_epw_from_missing_values():
"""Test import custom epw with wrong types."""
epw = EPW.from_missing_values()
file_path = './tests/assets/epw/missing.epw'
epw.write(file_path)
assert os.path.isfile(file_path)
assert os.stat(file_path).st_size > 1
os.remove(file_path)
def test_write_converted_epw():
"""Test that the saved EPW always has SI units."""
relative_path = './tests/assets/epw/chicago.epw'
epw = EPW(relative_path)
epw.convert_to_ip()
modified_path = './tests/assets/epw/chicago_modified.epw'
epw.write(modified_path)
assert epw.dry_bulb_temperature.header.unit == 'F'
assert epw.dry_bulb_temperature.values[0] == pytest.approx(21.02, rel=1e-2)
new_epw = EPW(modified_path)
assert new_epw.dry_bulb_temperature.header.unit == 'C'
assert new_epw.dry_bulb_temperature.values[0] == pytest.approx(-6.1, rel=1e-5)
os.remove(modified_path)
def test_to_ddy():
"""Test to_ddy."""
path = './tests/assets/epw/chicago.epw'
epw = EPW(path)
ddy_path = './tests/assets/epw/chicago_epw.ddy'
epw.to_ddy(ddy_path)
assert os.path.isfile(ddy_path)
assert os.stat(ddy_path).st_size > 1
os.remove(ddy_path)
ddy_path = './tests/assets/epw/chicago_epw_02.ddy'
epw.to_ddy(ddy_path, 2)
assert os.path.isfile(ddy_path)
assert os.stat(ddy_path).st_size > 1
os.remove(ddy_path)
def test_to_wea():
"""Test to_wea."""
path = './tests/assets/epw/chicago.epw'
epw = EPW(path)
wea_path = './tests/assets/wea/chicago_epw.wea'
epw.to_wea(wea_path)
assert os.path.isfile(wea_path)
assert os.stat(wea_path).st_size > 1
# check the order of the data in the file
with open(wea_path) as wea_f:
line = wea_f.readlines()
assert float(line[6].split(' ')[-2]) == epw.direct_normal_radiation[0]
assert float(line[6].split(' ')[-1]) == epw.diffuse_horizontal_radiation[0]
assert float(line[17].split(' ')[-2]) == epw.direct_normal_radiation[11]
assert float(line[17].split(' ')[-1]) == epw.diffuse_horizontal_radiation[11]
os.remove(wea_path)
|
ladybug-analysis-tools/ladybug
|
tests/epw_test.py
|
Python
|
gpl-3.0
| 16,256
|
[
"EPW"
] |
19ae1f28bc1efc5e525a1c6ec157347cb0b1d7e4a23c97594b96ca7961e890f1
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
******************************************************
**espresso.interaction.StillingerWeberPairTermCapped**
******************************************************
"""
from espresso import pmi, infinity
from espresso.esutil import *
from espresso.interaction.Potential import *
from espresso.interaction.Interaction import *
from _espresso import interaction_StillingerWeberPairTermCapped, \
interaction_VerletListStillingerWeberPairTermCapped, \
interaction_VerletListAdressStillingerWeberPairTermCapped, \
interaction_VerletListHadressStillingerWeberPairTermCapped, \
interaction_CellListStillingerWeberPairTermCapped, \
interaction_FixedPairListStillingerWeberPairTermCapped
class StillingerWeberPairTermCappedLocal(PotentialLocal, interaction_StillingerWeberPairTermCapped):
'The (local) Lennard-Jones potential.'
def __init__(self, A, B, p, q, epsilon=1.0, sigma=1.0, cutoff=infinity, caprad = 0.0):
"""Initialize the local Lennard Jones object."""
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_StillingerWeberPairTermCapped, A, B, p, q, epsilon, sigma, cutoff, caprad)
class VerletListStillingerWeberPairTermCappedLocal(InteractionLocal, interaction_VerletListStillingerWeberPairTermCapped):
'The (local) Lennard Jones interaction using Verlet lists.'
def __init__(self, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListStillingerWeberPairTermCapped, vl)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
def getVerletListLocal(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getVerletList(self)
def getCaprad(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getCaprad(self)
class VerletListAdressStillingerWeberPairTermCappedLocal(InteractionLocal, interaction_VerletListAdressStillingerWeberPairTermCapped):
'The (local) Lennard Jones interaction using Verlet lists.'
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListAdressStillingerWeberPairTermCapped, vl, fixedtupleList)
def setPotentialAT(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setPotentialCG(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialCG(self, type1, type2, potential)
class VerletListHadressStillingerWeberPairTermCappedLocal(InteractionLocal, interaction_VerletListHadressStillingerWeberPairTermCapped):
'The (local) Lennard Jones interaction using Verlet lists.'
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListHadressStillingerWeberPairTermCapped, vl, fixedtupleList)
def setPotentialAT(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setPotentialCG(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialCG(self, type1, type2, potential)
class CellListStillingerWeberPairTermCappedLocal(InteractionLocal, interaction_CellListStillingerWeberPairTermCapped):
'The (local) Lennard Jones interaction using cell lists.'
def __init__(self, stor):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_CellListStillingerWeberPairTermCapped, stor)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
class FixedPairListStillingerWeberPairTermCappedLocal(InteractionLocal, interaction_FixedPairListStillingerWeberPairTermCapped):
'The (local) Lennard-Jones interaction using FixedPair lists.'
def __init__(self, system, vl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairListStillingerWeberPairTermCapped, system, vl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
if pmi.isController:
class StillingerWeberPairTermCapped(Potential):
'The Lennard-Jones potential.'
pmiproxydefs = dict(
cls = 'espresso.interaction.StillingerWeberPairTermCappedLocal',
pmiproperty = ['A', 'B', 'p', 'q', 'epsilon', 'sigma', 'caprad'],
pmiinvoke = ['getCaprad']
)
class VerletListStillingerWeberPairTermCapped(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.VerletListStillingerWeberPairTermCappedLocal',
pmicall = ['setPotential', 'getPotential', 'getVerletList']
)
class VerletListAdressStillingerWeberPairTermCapped(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.VerletListAdressStillingerWeberPairTermCappedLocal',
pmicall = ['setPotentialAT', 'setPotentialCG']
)
class VerletListHadressStillingerWeberPairTermCapped(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.VerletListHadressStillingerWeberPairTermCappedLocal',
pmicall = ['setPotentialAT', 'setPotentialCG']
)
class CellListStillingerWeberPairTermCapped(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.CellListStillingerWeberPairTermCappedLocal',
pmicall = ['setPotential']
)
class FixedPairListStillingerWeberPairTermCapped(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.FixedPairListStillingerWeberPairTermCappedLocal',
pmicall = ['setPotential']
)
|
BackupTheBerlios/espressopp
|
src/interaction/StillingerWeberPairTermCapped.py
|
Python
|
gpl-3.0
| 8,215
|
[
"ESPResSo"
] |
981007c352dfce3b583c201ccdd20300780fc8fbffc69eaa0809db5a476fa705
|
# -*- coding: utf-8 -*-
import ast
import base64
import csv
import functools
import glob
import itertools
import jinja2
import logging
import operator
import datetime
import hashlib
import os
import re
import simplejson
import sys
import time
import urllib2
import zlib
from xml.etree import ElementTree
from cStringIO import StringIO
import babel.messages.pofile
import werkzeug.utils
import werkzeug.wrappers
try:
import xlwt
except ImportError:
xlwt = None
import openerp
import openerp.modules.registry
from openerp.addons.base.ir.ir_qweb import AssetsBundle, QWebTemplateNotFound
from openerp.tools.translate import _
from openerp import http
from openerp.http import request, serialize_exception as _serialize_exception
_logger = logging.getLogger(__name__)
if hasattr(sys, 'frozen'):
# When running on compiled windows binary, we don't have access to package loader.
path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'views'))
loader = jinja2.FileSystemLoader(path)
else:
loader = jinja2.PackageLoader('openerp.addons.web', "views")
env = jinja2.Environment(loader=loader, autoescape=True)
env.filters["json"] = simplejson.dumps
# 1 week cache for asset bundles as advised by Google Page Speed
BUNDLE_MAXAGE = 60 * 60 * 24 * 7
#----------------------------------------------------------
# OpenERP Web helpers
#----------------------------------------------------------
db_list = http.db_list
db_monodb = http.db_monodb
def serialize_exception(f):
@functools.wraps(f)
def wrap(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception, e:
_logger.exception("An exception occured during an http request")
se = _serialize_exception(e)
error = {
'code': 200,
'message': "Odoo Server Error",
'data': se
}
return werkzeug.exceptions.InternalServerError(simplejson.dumps(error))
return wrap
def redirect_with_hash(*args, **kw):
"""
.. deprecated:: 8.0
Use the ``http.redirect_with_hash()`` function instead.
"""
return http.redirect_with_hash(*args, **kw)
def abort_and_redirect(url):
r = request.httprequest
response = werkzeug.utils.redirect(url, 302)
response = r.app.get_response(r, response, explicit_session=False)
werkzeug.exceptions.abort(response)
def ensure_db(redirect='/web/database/selector'):
# This helper should be used in web client auth="none" routes
# if those routes needs a db to work with.
# If the heuristics does not find any database, then the users will be
# redirected to db selector or any url specified by `redirect` argument.
# If the db is taken out of a query parameter, it will be checked against
# `http.db_filter()` in order to ensure it's legit and thus avoid db
# forgering that could lead to xss attacks.
db = request.params.get('db')
# Ensure db is legit
if db and db not in http.db_filter([db]):
db = None
if db and not request.session.db:
# User asked a specific database on a new session.
# That mean the nodb router has been used to find the route
# Depending on installed module in the database, the rendering of the page
# may depend on data injected by the database route dispatcher.
# Thus, we redirect the user to the same page but with the session cookie set.
# This will force using the database route dispatcher...
r = request.httprequest
url_redirect = r.base_url
if r.query_string:
# Can't use werkzeug.wrappers.BaseRequest.url with encoded hashes:
# https://github.com/amigrave/werkzeug/commit/b4a62433f2f7678c234cdcac6247a869f90a7eb7
url_redirect += '?' + r.query_string
response = werkzeug.utils.redirect(url_redirect, 302)
request.session.db = db
abort_and_redirect(url_redirect)
# if db not provided, use the session one
if not db:
db = request.session.db
# if no database provided and no database in session, use monodb
if not db:
db = db_monodb(request.httprequest)
# if no db can be found til here, send to the database selector
# the database selector will redirect to database manager if needed
if not db:
werkzeug.exceptions.abort(werkzeug.utils.redirect(redirect, 303))
# always switch the session to the computed db
if db != request.session.db:
request.session.logout()
abort_and_redirect(request.httprequest.url)
request.session.db = db
def module_topological_sort(modules):
""" Return a list of module names sorted so that their dependencies of the
modules are listed before the module itself
modules is a dict of {module_name: dependencies}
:param modules: modules to sort
:type modules: dict
:returns: list(str)
"""
dependencies = set(itertools.chain.from_iterable(modules.itervalues()))
# incoming edge: dependency on other module (if a depends on b, a has an
# incoming edge from b, aka there's an edge from b to a)
# outgoing edge: other module depending on this one
# [Tarjan 1976], http://en.wikipedia.org/wiki/Topological_sorting#Algorithms
#L ← Empty list that will contain the sorted nodes
L = []
#S ← Set of all nodes with no outgoing edges (modules on which no other
# module depends)
S = set(module for module in modules if module not in dependencies)
visited = set()
#function visit(node n)
def visit(n):
#if n has not been visited yet then
if n not in visited:
#mark n as visited
visited.add(n)
#change: n not web module, can not be resolved, ignore
if n not in modules: return
#for each node m with an edge from m to n do (dependencies of n)
for m in modules[n]:
#visit(m)
visit(m)
#add n to L
L.append(n)
#for each node n in S do
for n in S:
#visit(n)
visit(n)
return L
def module_installed():
# Candidates module the current heuristic is the /static dir
loadable = http.addons_manifest.keys()
modules = {}
# Retrieve database installed modules
# TODO The following code should move to ir.module.module.list_installed_modules()
Modules = request.session.model('ir.module.module')
domain = [('state','=','installed'), ('name','in', loadable)]
for module in Modules.search_read(domain, ['name', 'dependencies_id']):
modules[module['name']] = []
deps = module.get('dependencies_id')
if deps:
deps_read = request.session.model('ir.module.module.dependency').read(deps, ['name'])
dependencies = [i['name'] for i in deps_read]
modules[module['name']] = dependencies
sorted_modules = module_topological_sort(modules)
return sorted_modules
def module_installed_bypass_session(dbname):
loadable = http.addons_manifest.keys()
modules = {}
try:
registry = openerp.modules.registry.RegistryManager.get(dbname)
with registry.cursor() as cr:
m = registry.get('ir.module.module')
# TODO The following code should move to ir.module.module.list_installed_modules()
domain = [('state','=','installed'), ('name','in', loadable)]
ids = m.search(cr, 1, [('state','=','installed'), ('name','in', loadable)])
for module in m.read(cr, 1, ids, ['name', 'dependencies_id']):
modules[module['name']] = []
deps = module.get('dependencies_id')
if deps:
deps_read = registry.get('ir.module.module.dependency').read(cr, 1, deps, ['name'])
dependencies = [i['name'] for i in deps_read]
modules[module['name']] = dependencies
except Exception,e:
pass
sorted_modules = module_topological_sort(modules)
return sorted_modules
def module_boot(db=None):
server_wide_modules = openerp.conf.server_wide_modules or ['web']
serverside = []
dbside = []
for i in server_wide_modules:
if i in http.addons_manifest:
serverside.append(i)
monodb = db or db_monodb()
if monodb:
dbside = module_installed_bypass_session(monodb)
dbside = [i for i in dbside if i not in serverside]
addons = serverside + dbside
return addons
def concat_xml(file_list):
"""Concatenate xml files
:param list(str) file_list: list of files to check
:returns: (concatenation_result, checksum)
:rtype: (str, str)
"""
checksum = hashlib.new('sha1')
if not file_list:
return '', checksum.hexdigest()
root = None
for fname in file_list:
with open(fname, 'rb') as fp:
contents = fp.read()
checksum.update(contents)
fp.seek(0)
xml = ElementTree.parse(fp).getroot()
if root is None:
root = ElementTree.Element(xml.tag)
#elif root.tag != xml.tag:
# raise ValueError("Root tags missmatch: %r != %r" % (root.tag, xml.tag))
for child in xml.getchildren():
root.append(child)
return ElementTree.tostring(root, 'utf-8'), checksum.hexdigest()
def fs2web(path):
"""convert FS path into web path"""
return '/'.join(path.split(os.path.sep))
def manifest_glob(extension, addons=None, db=None, include_remotes=False):
if addons is None:
addons = module_boot(db=db)
else:
addons = addons.split(',')
r = []
for addon in addons:
manifest = http.addons_manifest.get(addon, None)
if not manifest:
continue
# ensure does not ends with /
addons_path = os.path.join(manifest['addons_path'], '')[:-1]
globlist = manifest.get(extension, [])
for pattern in globlist:
if pattern.startswith(('http://', 'https://', '//')):
if include_remotes:
r.append((None, pattern))
else:
for path in glob.glob(os.path.normpath(os.path.join(addons_path, addon, pattern))):
r.append((path, fs2web(path[len(addons_path):])))
return r
def manifest_list(extension, mods=None, db=None, debug=None):
""" list ressources to load specifying either:
mods: a comma separated string listing modules
db: a database name (return all installed modules in that database)
"""
if debug is not None:
_logger.warning("openerp.addons.web.main.manifest_list(): debug parameter is deprecated")
files = manifest_glob(extension, addons=mods, db=db, include_remotes=True)
return [wp for _fp, wp in files]
def get_last_modified(files):
""" Returns the modification time of the most recently modified
file provided
:param list(str) files: names of files to check
:return: most recent modification time amongst the fileset
:rtype: datetime.datetime
"""
files = list(files)
if files:
return max(datetime.datetime.fromtimestamp(os.path.getmtime(f))
for f in files)
return datetime.datetime(1970, 1, 1)
def make_conditional(response, last_modified=None, etag=None, max_age=0):
""" Makes the provided response conditional based upon the request,
and mandates revalidation from clients
Uses Werkzeug's own :meth:`ETagResponseMixin.make_conditional`, after
setting ``last_modified`` and ``etag`` correctly on the response object
:param response: Werkzeug response
:type response: werkzeug.wrappers.Response
:param datetime.datetime last_modified: last modification date of the response content
:param str etag: some sort of checksum of the content (deep etag)
:return: the response object provided
:rtype: werkzeug.wrappers.Response
"""
response.cache_control.must_revalidate = True
response.cache_control.max_age = max_age
if last_modified:
response.last_modified = last_modified
if etag:
response.set_etag(etag)
return response.make_conditional(request.httprequest)
def login_and_redirect(db, login, key, redirect_url='/web'):
request.session.authenticate(db, login, key)
return set_cookie_and_redirect(redirect_url)
def set_cookie_and_redirect(redirect_url):
redirect = werkzeug.utils.redirect(redirect_url, 303)
redirect.autocorrect_location_header = False
return redirect
def login_redirect():
url = '/web/login?'
if request.debug:
url += 'debug&'
return """<html><head><script>
window.location = '%sredirect=' + encodeURIComponent(window.location);
</script></head></html>
""" % (url,)
def load_actions_from_ir_values(key, key2, models, meta):
Values = request.session.model('ir.values')
actions = Values.get(key, key2, models, meta, request.context)
return [(id, name, clean_action(action))
for id, name, action in actions]
def clean_action(action):
action.setdefault('flags', {})
action_type = action.setdefault('type', 'ir.actions.act_window_close')
if action_type == 'ir.actions.act_window':
return fix_view_modes(action)
return action
# I think generate_views,fix_view_modes should go into js ActionManager
def generate_views(action):
"""
While the server generates a sequence called "views" computing dependencies
between a bunch of stuff for views coming directly from the database
(the ``ir.actions.act_window model``), it's also possible for e.g. buttons
to return custom view dictionaries generated on the fly.
In that case, there is no ``views`` key available on the action.
Since the web client relies on ``action['views']``, generate it here from
``view_mode`` and ``view_id``.
Currently handles two different cases:
* no view_id, multiple view_mode
* single view_id, single view_mode
:param dict action: action descriptor dictionary to generate a views key for
"""
view_id = action.get('view_id') or False
if isinstance(view_id, (list, tuple)):
view_id = view_id[0]
# providing at least one view mode is a requirement, not an option
view_modes = action['view_mode'].split(',')
if len(view_modes) > 1:
if view_id:
raise ValueError('Non-db action dictionaries should provide '
'either multiple view modes or a single view '
'mode and an optional view id.\n\n Got view '
'modes %r and view id %r for action %r' % (
view_modes, view_id, action))
action['views'] = [(False, mode) for mode in view_modes]
return
action['views'] = [(view_id, view_modes[0])]
def fix_view_modes(action):
""" For historical reasons, OpenERP has weird dealings in relation to
view_mode and the view_type attribute (on window actions):
* one of the view modes is ``tree``, which stands for both list views
and tree views
* the choice is made by checking ``view_type``, which is either
``form`` for a list view or ``tree`` for an actual tree view
This methods simply folds the view_type into view_mode by adding a
new view mode ``list`` which is the result of the ``tree`` view_mode
in conjunction with the ``form`` view_type.
TODO: this should go into the doc, some kind of "peculiarities" section
:param dict action: an action descriptor
:returns: nothing, the action is modified in place
"""
if not action.get('views'):
generate_views(action)
if action.pop('view_type', 'form') != 'form':
return action
if 'view_mode' in action:
action['view_mode'] = ','.join(
mode if mode != 'tree' else 'list'
for mode in action['view_mode'].split(','))
action['views'] = [
[id, mode if mode != 'tree' else 'list']
for id, mode in action['views']
]
return action
def _local_web_translations(trans_file):
messages = []
try:
with open(trans_file) as t_file:
po = babel.messages.pofile.read_po(t_file)
except Exception:
return
for x in po:
if x.id and x.string and "openerp-web" in x.auto_comments:
messages.append({'id': x.id, 'string': x.string})
return messages
def xml2json_from_elementtree(el, preserve_whitespaces=False):
""" xml2json-direct
Simple and straightforward XML-to-JSON converter in Python
New BSD Licensed
http://code.google.com/p/xml2json-direct/
"""
res = {}
if el.tag[0] == "{":
ns, name = el.tag.rsplit("}", 1)
res["tag"] = name
res["namespace"] = ns[1:]
else:
res["tag"] = el.tag
res["attrs"] = {}
for k, v in el.items():
res["attrs"][k] = v
kids = []
if el.text and (preserve_whitespaces or el.text.strip() != ''):
kids.append(el.text)
for kid in el:
kids.append(xml2json_from_elementtree(kid, preserve_whitespaces))
if kid.tail and (preserve_whitespaces or kid.tail.strip() != ''):
kids.append(kid.tail)
res["children"] = kids
return res
def content_disposition(filename):
filename = filename.encode('utf8')
escaped = urllib2.quote(filename)
browser = request.httprequest.user_agent.browser
version = int((request.httprequest.user_agent.version or '0').split('.')[0])
if browser == 'msie' and version < 9:
return "attachment; filename=%s" % escaped
elif browser == 'safari':
return "attachment; filename=%s" % filename
else:
return "attachment; filename*=UTF-8''%s" % escaped
#----------------------------------------------------------
# OpenERP Web web Controllers
#----------------------------------------------------------
class Home(http.Controller):
@http.route('/', type='http', auth="none")
def index(self, s_action=None, db=None, **kw):
return http.local_redirect('/web', query=request.params, keep_hash=True)
@http.route('/web', type='http', auth="none")
def web_client(self, s_action=None, **kw):
ensure_db()
if request.session.uid:
if kw.get('redirect'):
return werkzeug.utils.redirect(kw.get('redirect'), 303)
if not request.uid:
request.uid = request.session.uid
menu_data = request.registry['ir.ui.menu'].load_menus(request.cr, request.uid, context=request.context)
return request.render('web.webclient_bootstrap', qcontext={'menu_data': menu_data})
else:
return login_redirect()
@http.route('/web/login', type='http', auth="none")
def web_login(self, redirect=None, **kw):
ensure_db()
if request.httprequest.method == 'GET' and redirect and request.session.uid:
return http.redirect_with_hash(redirect)
if not request.uid:
request.uid = openerp.SUPERUSER_ID
values = request.params.copy()
if not redirect:
redirect = '/web?' + request.httprequest.query_string
values['redirect'] = redirect
try:
values['databases'] = http.db_list()
except openerp.exceptions.AccessDenied:
values['databases'] = None
if request.httprequest.method == 'POST':
old_uid = request.uid
uid = request.session.authenticate(request.session.db, request.params['login'], request.params['password'])
if uid is not False:
return http.redirect_with_hash(redirect)
request.uid = old_uid
values['error'] = "Wrong login/password"
return request.render('web.login', values)
@http.route('/login', type='http', auth="none")
def login(self, db, login, key, redirect="/web", **kw):
return login_and_redirect(db, login, key, redirect_url=redirect)
@http.route([
'/web/js/<xmlid>',
'/web/js/<xmlid>/<version>',
], type='http', auth='public')
def js_bundle(self, xmlid, version=None, **kw):
try:
bundle = AssetsBundle(xmlid)
except QWebTemplateNotFound:
return request.not_found()
response = request.make_response(bundle.js(), [('Content-Type', 'application/javascript')])
return make_conditional(response, bundle.last_modified, max_age=BUNDLE_MAXAGE)
@http.route([
'/web/css/<xmlid>',
'/web/css/<xmlid>/<version>',
], type='http', auth='public')
def css_bundle(self, xmlid, version=None, **kw):
try:
bundle = AssetsBundle(xmlid)
except QWebTemplateNotFound:
return request.not_found()
response = request.make_response(bundle.css(), [('Content-Type', 'text/css')])
return make_conditional(response, bundle.last_modified, max_age=BUNDLE_MAXAGE)
class WebClient(http.Controller):
@http.route('/web/webclient/csslist', type='json', auth="none")
def csslist(self, mods=None):
return manifest_list('css', mods=mods)
@http.route('/web/webclient/jslist', type='json', auth="none")
def jslist(self, mods=None):
return manifest_list('js', mods=mods)
@http.route('/web/webclient/locale/<string:lang>', type='http', auth="none")
def load_locale(self, lang):
magic_file_finding = [lang.replace("_",'-').lower(), lang.split('_')[0]]
addons_path = http.addons_manifest['web']['addons_path']
#load datejs locale
datejs_locale = ""
try:
with open(os.path.join(addons_path, 'web', 'static', 'lib', 'datejs', 'globalization', lang.replace('_', '-') + '.js'), 'r') as f:
datejs_locale = f.read()
except IOError:
pass
#load momentjs locale
momentjs_locale_file = False
momentjs_locale = ""
for code in magic_file_finding:
try:
with open(os.path.join(addons_path, 'web', 'static', 'lib', 'moment', 'locale', code + '.js'), 'r') as f:
momentjs_locale = f.read()
#we found a locale matching so we can exit
break
except IOError:
continue
#return the content of the locale
headers = [('Content-Type', 'application/javascript'), ('Cache-Control', 'max-age=%s' % (36000))]
return request.make_response(datejs_locale + "\n"+ momentjs_locale, headers)
@http.route('/web/webclient/qweb', type='http', auth="none")
def qweb(self, mods=None, db=None):
files = [f[0] for f in manifest_glob('qweb', addons=mods, db=db)]
last_modified = get_last_modified(files)
if request.httprequest.if_modified_since and request.httprequest.if_modified_since >= last_modified:
return werkzeug.wrappers.Response(status=304)
content, checksum = concat_xml(files)
return make_conditional(
request.make_response(content, [('Content-Type', 'text/xml')]),
last_modified, checksum)
@http.route('/web/webclient/bootstrap_translations', type='json', auth="none")
def bootstrap_translations(self, mods):
""" Load local translations from *.po files, as a temporary solution
until we have established a valid session. This is meant only
for translating the login page and db management chrome, using
the browser's language. """
# For performance reasons we only load a single translation, so for
# sub-languages (that should only be partially translated) we load the
# main language PO instead - that should be enough for the login screen.
lang = request.lang.split('_')[0]
translations_per_module = {}
for addon_name in mods:
if http.addons_manifest[addon_name].get('bootstrap'):
addons_path = http.addons_manifest[addon_name]['addons_path']
f_name = os.path.join(addons_path, addon_name, "i18n", lang + ".po")
if not os.path.exists(f_name):
continue
translations_per_module[addon_name] = {'messages': _local_web_translations(f_name)}
return {"modules": translations_per_module,
"lang_parameters": None}
@http.route('/web/webclient/translations', type='json', auth="none")
def translations(self, mods=None, lang=None):
request.disable_db = False
uid = openerp.SUPERUSER_ID
if mods is None:
m = request.registry.get('ir.module.module')
mods = [x['name'] for x in m.search_read(request.cr, uid,
[('state','=','installed')], ['name'])]
if lang is None:
lang = request.context["lang"]
res_lang = request.registry.get('res.lang')
ids = res_lang.search(request.cr, uid, [("code", "=", lang)])
lang_params = None
if ids:
lang_params = res_lang.read(request.cr, uid, ids[0], ["direction", "date_format", "time_format",
"grouping", "decimal_point", "thousands_sep"])
# Regional languages (ll_CC) must inherit/override their parent lang (ll), but this is
# done server-side when the language is loaded, so we only need to load the user's lang.
ir_translation = request.registry.get('ir.translation')
translations_per_module = {}
messages = ir_translation.search_read(request.cr, uid, [('module','in',mods),('lang','=',lang),
('comments','like','openerp-web'),('value','!=',False),
('value','!=','')],
['module','src','value','lang'], order='module')
for mod, msg_group in itertools.groupby(messages, key=operator.itemgetter('module')):
translations_per_module.setdefault(mod,{'messages':[]})
translations_per_module[mod]['messages'].extend({'id': m['src'],
'string': m['value']} \
for m in msg_group)
return {"modules": translations_per_module,
"lang_parameters": lang_params}
@http.route('/web/webclient/version_info', type='json', auth="none")
def version_info(self):
return openerp.service.common.exp_version()
@http.route('/web/tests', type='http', auth="none")
def index(self, mod=None, **kwargs):
return request.render('web.qunit_suite')
class Proxy(http.Controller):
@http.route('/web/proxy/load', type='json', auth="none")
def load(self, path):
""" Proxies an HTTP request through a JSON request.
It is strongly recommended to not request binary files through this,
as the result will be a binary data blob as well.
:param path: actual request path
:return: file content
"""
from werkzeug.test import Client
from werkzeug.wrappers import BaseResponse
return Client(request.httprequest.app, BaseResponse).get(path).data
class Database(http.Controller):
@http.route('/web/database/selector', type='http', auth="none")
def selector(self, **kw):
try:
dbs = http.db_list()
if not dbs:
return http.local_redirect('/web/database/manager')
except openerp.exceptions.AccessDenied:
dbs = False
return env.get_template("database_selector.html").render({
'databases': dbs,
'debug': request.debug,
})
@http.route('/web/database/manager', type='http', auth="none")
def manager(self, **kw):
# TODO: migrate the webclient's database manager to server side views
request.session.logout()
return env.get_template("database_manager.html").render({
'modules': simplejson.dumps(module_boot()),
})
@http.route('/web/database/get_list', type='json', auth="none")
def get_list(self):
# TODO change js to avoid calling this method if in monodb mode
try:
return http.db_list()
except openerp.exceptions.AccessDenied:
monodb = db_monodb()
if monodb:
return [monodb]
raise
@http.route('/web/database/create', type='json', auth="none")
def create(self, fields):
params = dict(map(operator.itemgetter('name', 'value'), fields))
db_created = request.session.proxy("db").create_database(
params['super_admin_pwd'],
params['db_name'],
bool(params.get('demo_data')),
params['db_lang'],
params['create_admin_pwd'])
if db_created:
request.session.authenticate(params['db_name'], 'admin', params['create_admin_pwd'])
return db_created
@http.route('/web/database/duplicate', type='json', auth="none")
def duplicate(self, fields):
params = dict(map(operator.itemgetter('name', 'value'), fields))
duplicate_attrs = (
params['super_admin_pwd'],
params['db_original_name'],
params['db_name'],
)
return request.session.proxy("db").duplicate_database(*duplicate_attrs)
@http.route('/web/database/drop', type='json', auth="none")
def drop(self, fields):
password, db = operator.itemgetter(
'drop_pwd', 'drop_db')(
dict(map(operator.itemgetter('name', 'value'), fields)))
try:
if request.session.proxy("db").drop(password, db):
return True
else:
return False
except openerp.exceptions.AccessDenied:
return {'error': 'AccessDenied', 'title': 'Drop Database'}
except Exception:
return {'error': _('Could not drop database !'), 'title': _('Drop Database')}
@http.route('/web/database/backup', type='http', auth="none")
def backup(self, backup_db, backup_pwd, token):
try:
db_dump = base64.b64decode(
request.session.proxy("db").dump(backup_pwd, backup_db))
filename = "%(db)s_%(timestamp)s.dump" % {
'db': backup_db,
'timestamp': datetime.datetime.utcnow().strftime(
"%Y-%m-%d_%H-%M-%SZ")
}
return request.make_response(db_dump,
[('Content-Type', 'application/octet-stream; charset=binary'),
('Content-Disposition', content_disposition(filename))],
{'fileToken': token}
)
except Exception, e:
return simplejson.dumps([[],[{'error': openerp.tools.ustr(e), 'title': _('Backup Database')}]])
@http.route('/web/database/restore', type='http', auth="none")
def restore(self, db_file, restore_pwd, new_db, mode):
try:
copy = mode == 'copy'
data = base64.b64encode(db_file.read())
request.session.proxy("db").restore(restore_pwd, new_db, data, copy)
return ''
except openerp.exceptions.AccessDenied, e:
raise Exception("AccessDenied")
@http.route('/web/database/change_password', type='json', auth="none")
def change_password(self, fields):
old_password, new_password = operator.itemgetter(
'old_pwd', 'new_pwd')(
dict(map(operator.itemgetter('name', 'value'), fields)))
try:
return request.session.proxy("db").change_admin_password(old_password, new_password)
except openerp.exceptions.AccessDenied:
return {'error': 'AccessDenied', 'title': _('Change Password')}
except Exception:
return {'error': _('Error, password not changed !'), 'title': _('Change Password')}
class Session(http.Controller):
def session_info(self):
request.session.ensure_valid()
return {
"session_id": request.session_id,
"uid": request.session.uid,
"user_context": request.session.get_context() if request.session.uid else {},
"db": request.session.db,
"username": request.session.login,
}
@http.route('/web/session/get_session_info', type='json', auth="none")
def get_session_info(self):
request.uid = request.session.uid
request.disable_db = False
return self.session_info()
@http.route('/web/session/authenticate', type='json', auth="none")
def authenticate(self, db, login, password, base_location=None):
request.session.authenticate(db, login, password)
return self.session_info()
@http.route('/web/session/change_password', type='json', auth="user")
def change_password(self, fields):
old_password, new_password,confirm_password = operator.itemgetter('old_pwd', 'new_password','confirm_pwd')(
dict(map(operator.itemgetter('name', 'value'), fields)))
if not (old_password.strip() and new_password.strip() and confirm_password.strip()):
return {'error':_('You cannot leave any password empty.'),'title': _('Change Password')}
if new_password != confirm_password:
return {'error': _('The new password and its confirmation must be identical.'),'title': _('Change Password')}
try:
if request.session.model('res.users').change_password(
old_password, new_password):
return {'new_password':new_password}
except Exception:
return {'error': _('The old password you provided is incorrect, your password was not changed.'), 'title': _('Change Password')}
return {'error': _('Error, password not changed !'), 'title': _('Change Password')}
@http.route('/web/session/get_lang_list', type='json', auth="none")
def get_lang_list(self):
try:
return request.session.proxy("db").list_lang() or []
except Exception, e:
return {"error": e, "title": _("Languages")}
@http.route('/web/session/modules', type='json', auth="user")
def modules(self):
# return all installed modules. Web client is smart enough to not load a module twice
return module_installed()
@http.route('/web/session/save_session_action', type='json', auth="user")
def save_session_action(self, the_action):
"""
This method store an action object in the session object and returns an integer
identifying that action. The method get_session_action() can be used to get
back the action.
:param the_action: The action to save in the session.
:type the_action: anything
:return: A key identifying the saved action.
:rtype: integer
"""
return request.httpsession.save_action(the_action)
@http.route('/web/session/get_session_action', type='json', auth="user")
def get_session_action(self, key):
"""
Gets back a previously saved action. This method can return None if the action
was saved since too much time (this case should be handled in a smart way).
:param key: The key given by save_session_action()
:type key: integer
:return: The saved action or None.
:rtype: anything
"""
return request.httpsession.get_action(key)
@http.route('/web/session/check', type='json', auth="user")
def check(self):
request.session.assert_valid()
return None
@http.route('/web/session/destroy', type='json', auth="user")
def destroy(self):
request.session.logout()
@http.route('/web/session/logout', type='http', auth="none")
def logout(self, redirect='/web'):
request.session.logout(keep_db=True)
return werkzeug.utils.redirect(redirect, 303)
class Menu(http.Controller):
@http.route('/web/menu/load_needaction', type='json', auth="user")
def load_needaction(self, menu_ids):
""" Loads needaction counters for specific menu ids.
:return: needaction data
:rtype: dict(menu_id: {'needaction_enabled': boolean, 'needaction_counter': int})
"""
return request.session.model('ir.ui.menu').get_needaction_data(menu_ids, request.context)
class DataSet(http.Controller):
@http.route('/web/dataset/search_read', type='json', auth="user")
def search_read(self, model, fields=False, offset=0, limit=False, domain=None, sort=None):
return self.do_search_read(model, fields, offset, limit, domain, sort)
def do_search_read(self, model, fields=False, offset=0, limit=False, domain=None
, sort=None):
""" Performs a search() followed by a read() (if needed) using the
provided search criteria
:param str model: the name of the model to search on
:param fields: a list of the fields to return in the result records
:type fields: [str]
:param int offset: from which index should the results start being returned
:param int limit: the maximum number of records to return
:param list domain: the search domain for the query
:param list sort: sorting directives
:returns: A structure (dict) with two keys: ids (all the ids matching
the (domain, context) pair) and records (paginated records
matching fields selection set)
:rtype: list
"""
Model = request.session.model(model)
records = Model.search_read(domain, fields, offset or 0, limit or False, sort or False,
request.context)
if not records:
return {
'length': 0,
'records': []
}
if limit and len(records) == limit:
length = Model.search_count(domain, request.context)
else:
length = len(records) + (offset or 0)
return {
'length': length,
'records': records
}
@http.route('/web/dataset/load', type='json', auth="user")
def load(self, model, id, fields):
m = request.session.model(model)
value = {}
r = m.read([id], False, request.context)
if r:
value = r[0]
return {'value': value}
def call_common(self, model, method, args, domain_id=None, context_id=None):
return self._call_kw(model, method, args, {})
def _call_kw(self, model, method, args, kwargs):
# Temporary implements future display_name special field for model#read()
if method in ('read', 'search_read') and kwargs.get('context', {}).get('future_display_name'):
if 'display_name' in args[1]:
if method == 'read':
names = dict(request.session.model(model).name_get(args[0], **kwargs))
else:
names = dict(request.session.model(model).name_search('', args[0], **kwargs))
args[1].remove('display_name')
records = getattr(request.session.model(model), method)(*args, **kwargs)
for record in records:
record['display_name'] = \
names.get(record['id']) or "{0}#{1}".format(model, (record['id']))
return records
if method.startswith('_'):
raise Exception("Access Denied: Underscore prefixed methods cannot be remotely called")
return getattr(request.registry.get(model), method)(request.cr, request.uid, *args, **kwargs)
@http.route('/web/dataset/call', type='json', auth="user")
def call(self, model, method, args, domain_id=None, context_id=None):
return self._call_kw(model, method, args, {})
@http.route(['/web/dataset/call_kw', '/web/dataset/call_kw/<path:path>'], type='json', auth="user")
def call_kw(self, model, method, args, kwargs, path=None):
return self._call_kw(model, method, args, kwargs)
@http.route('/web/dataset/call_button', type='json', auth="user")
def call_button(self, model, method, args, domain_id=None, context_id=None):
action = self._call_kw(model, method, args, {})
if isinstance(action, dict) and action.get('type') != '':
return clean_action(action)
return False
@http.route('/web/dataset/exec_workflow', type='json', auth="user")
def exec_workflow(self, model, id, signal):
return request.session.exec_workflow(model, id, signal)
@http.route('/web/dataset/resequence', type='json', auth="user")
def resequence(self, model, ids, field='sequence', offset=0):
""" Re-sequences a number of records in the model, by their ids
The re-sequencing starts at the first model of ``ids``, the sequence
number is incremented by one after each record and starts at ``offset``
:param ids: identifiers of the records to resequence, in the new sequence order
:type ids: list(id)
:param str field: field used for sequence specification, defaults to
"sequence"
:param int offset: sequence number for first record in ``ids``, allows
starting the resequencing from an arbitrary number,
defaults to ``0``
"""
m = request.session.model(model)
if not m.fields_get([field]):
return False
# python 2.6 has no start parameter
for i, id in enumerate(ids):
m.write(id, { field: i + offset })
return True
class View(http.Controller):
@http.route('/web/view/add_custom', type='json', auth="user")
def add_custom(self, view_id, arch):
CustomView = request.session.model('ir.ui.view.custom')
CustomView.create({
'user_id': request.session.uid,
'ref_id': view_id,
'arch': arch
}, request.context)
return {'result': True}
@http.route('/web/view/undo_custom', type='json', auth="user")
def undo_custom(self, view_id, reset=False):
CustomView = request.session.model('ir.ui.view.custom')
vcustom = CustomView.search([('user_id', '=', request.session.uid), ('ref_id' ,'=', view_id)],
0, False, False, request.context)
if vcustom:
if reset:
CustomView.unlink(vcustom, request.context)
else:
CustomView.unlink([vcustom[0]], request.context)
return {'result': True}
return {'result': False}
class TreeView(View):
@http.route('/web/treeview/action', type='json', auth="user")
def action(self, model, id):
return load_actions_from_ir_values(
'action', 'tree_but_open',[(model, id)],
False)
class Binary(http.Controller):
@http.route('/web/binary/image', type='http', auth="public")
def image(self, model, id, field, **kw):
last_update = '__last_update'
Model = request.session.model(model)
headers = [('Content-Type', 'image/png')]
etag = request.httprequest.headers.get('If-None-Match')
hashed_session = hashlib.md5(request.session_id).hexdigest()
retag = hashed_session
id = None if not id else simplejson.loads(id)
if type(id) is list:
id = id[0] # m2o
try:
if etag:
if not id and hashed_session == etag:
return werkzeug.wrappers.Response(status=304)
else:
date = Model.read([id], [last_update], request.context)[0].get(last_update)
if hashlib.md5(date).hexdigest() == etag:
return werkzeug.wrappers.Response(status=304)
if not id:
res = Model.default_get([field], request.context).get(field)
image_base64 = res
else:
res = Model.read([id], [last_update, field], request.context)[0]
retag = hashlib.md5(res.get(last_update)).hexdigest()
image_base64 = res.get(field)
if kw.get('resize'):
resize = kw.get('resize').split(',')
if len(resize) == 2 and int(resize[0]) and int(resize[1]):
width = int(resize[0])
height = int(resize[1])
# resize maximum 500*500
if width > 500: width = 500
if height > 500: height = 500
image_base64 = openerp.tools.image_resize_image(base64_source=image_base64, size=(width, height), encoding='base64', filetype='PNG')
image_data = base64.b64decode(image_base64)
except Exception:
image_data = self.placeholder()
headers.append(('ETag', retag))
headers.append(('Content-Length', len(image_data)))
try:
ncache = int(kw.get('cache'))
headers.append(('Cache-Control', 'no-cache' if ncache == 0 else 'max-age=%s' % (ncache)))
except:
pass
return request.make_response(image_data, headers)
def placeholder(self, image='placeholder.png'):
addons_path = http.addons_manifest['web']['addons_path']
return open(os.path.join(addons_path, 'web', 'static', 'src', 'img', image), 'rb').read()
@http.route('/web/binary/saveas', type='http', auth="public")
@serialize_exception
def saveas(self, model, field, id=None, filename_field=None, **kw):
""" Download link for files stored as binary fields.
If the ``id`` parameter is omitted, fetches the default value for the
binary field (via ``default_get``), otherwise fetches the field for
that precise record.
:param str model: name of the model to fetch the binary from
:param str field: binary field
:param str id: id of the record from which to fetch the binary
:param str filename_field: field holding the file's name, if any
:returns: :class:`werkzeug.wrappers.Response`
"""
Model = request.session.model(model)
fields = [field]
if filename_field:
fields.append(filename_field)
if id:
res = Model.read([int(id)], fields, request.context)[0]
else:
res = Model.default_get(fields, request.context)
filecontent = base64.b64decode(res.get(field, ''))
if not filecontent:
return request.not_found()
else:
filename = '%s_%s' % (model.replace('.', '_'), id)
if filename_field:
filename = res.get(filename_field, '') or filename
return request.make_response(filecontent,
[('Content-Type', 'application/octet-stream'),
('Content-Disposition', content_disposition(filename))])
@http.route('/web/binary/saveas_ajax', type='http', auth="public")
@serialize_exception
def saveas_ajax(self, data, token):
jdata = simplejson.loads(data)
model = jdata['model']
field = jdata['field']
data = jdata['data']
id = jdata.get('id', None)
filename_field = jdata.get('filename_field', None)
context = jdata.get('context', {})
Model = request.session.model(model)
fields = [field]
if filename_field:
fields.append(filename_field)
if data:
res = { field: data }
elif id:
res = Model.read([int(id)], fields, context)[0]
else:
res = Model.default_get(fields, context)
filecontent = base64.b64decode(res.get(field, ''))
if not filecontent:
raise ValueError(_("No content found for field '%s' on '%s:%s'") %
(field, model, id))
else:
filename = '%s_%s' % (model.replace('.', '_'), id)
if filename_field:
filename = res.get(filename_field, '') or filename
return request.make_response(filecontent,
headers=[('Content-Type', 'application/octet-stream'),
('Content-Disposition', content_disposition(filename))],
cookies={'fileToken': token})
@http.route('/web/binary/upload', type='http', auth="user")
@serialize_exception
def upload(self, callback, ufile):
# TODO: might be useful to have a configuration flag for max-length file uploads
out = """<script language="javascript" type="text/javascript">
var win = window.top.window;
win.jQuery(win).trigger(%s, %s);
</script>"""
try:
data = ufile.read()
args = [len(data), ufile.filename,
ufile.content_type, base64.b64encode(data)]
except Exception, e:
args = [False, e.message]
return out % (simplejson.dumps(callback), simplejson.dumps(args))
@http.route('/web/binary/upload_attachment', type='http', auth="user")
@serialize_exception
def upload_attachment(self, callback, model, id, ufile):
Model = request.session.model('ir.attachment')
out = """<script language="javascript" type="text/javascript">
var win = window.top.window;
win.jQuery(win).trigger(%s, %s);
</script>"""
try:
attachment_id = Model.create({
'name': ufile.filename,
'datas': base64.encodestring(ufile.read()),
'datas_fname': ufile.filename,
'res_model': model,
'res_id': int(id)
}, request.context)
args = {
'filename': ufile.filename,
'id': attachment_id
}
except Exception:
args = {'error': "Something horrible happened"}
return out % (simplejson.dumps(callback), simplejson.dumps(args))
@http.route([
'/web/binary/company_logo',
'/logo',
'/logo.png',
], type='http', auth="none")
def company_logo(self, dbname=None):
# TODO add etag, refactor to use /image code for etag
uid = None
if request.session.db:
dbname = request.session.db
uid = request.session.uid
elif dbname is None:
dbname = db_monodb()
if not uid:
uid = openerp.SUPERUSER_ID
if not dbname:
image_data = self.placeholder('logo.png')
else:
try:
# create an empty registry
registry = openerp.modules.registry.Registry(dbname)
with registry.cursor() as cr:
cr.execute("""SELECT c.logo_web
FROM res_users u
LEFT JOIN res_company c
ON c.id = u.company_id
WHERE u.id = %s
""", (uid,))
row = cr.fetchone()
if row and row[0]:
image_data = str(row[0]).decode('base64')
else:
image_data = self.placeholder('nologo.png')
except Exception:
image_data = self.placeholder('logo.png')
headers = [
('Content-Type', 'image/png'),
('Content-Length', len(image_data)),
]
return request.make_response(image_data, headers)
class Action(http.Controller):
@http.route('/web/action/load', type='json', auth="user")
def load(self, action_id, do_not_eval=False):
Actions = request.session.model('ir.actions.actions')
value = False
try:
action_id = int(action_id)
except ValueError:
try:
module, xmlid = action_id.split('.', 1)
model, action_id = request.session.model('ir.model.data').get_object_reference(module, xmlid)
assert model.startswith('ir.actions.')
except Exception:
action_id = 0 # force failed read
base_action = Actions.read([action_id], ['type'], request.context)
if base_action:
ctx = {}
action_type = base_action[0]['type']
if action_type == 'ir.actions.report.xml':
ctx.update({'bin_size': True})
ctx.update(request.context)
action = request.session.model(action_type).read([action_id], False, ctx)
if action:
value = clean_action(action[0])
return value
@http.route('/web/action/run', type='json', auth="user")
def run(self, action_id):
return_action = request.session.model('ir.actions.server').run(
[action_id], request.context)
if return_action:
return clean_action(return_action)
else:
return False
class Export(http.Controller):
@http.route('/web/export/formats', type='json', auth="user")
def formats(self):
""" Returns all valid export formats
:returns: for each export format, a pair of identifier and printable name
:rtype: [(str, str)]
"""
return [
{'tag': 'csv', 'label': 'CSV'},
{'tag': 'xls', 'label': 'Excel', 'error': None if xlwt else "XLWT required"},
]
def fields_get(self, model):
Model = request.session.model(model)
fields = Model.fields_get(False, request.context)
return fields
@http.route('/web/export/get_fields', type='json', auth="user")
def get_fields(self, model, prefix='', parent_name= '',
import_compat=True, parent_field_type=None,
exclude=None):
if import_compat and parent_field_type == "many2one":
fields = {}
else:
fields = self.fields_get(model)
if import_compat:
fields.pop('id', None)
else:
fields['.id'] = fields.pop('id', {'string': 'ID'})
fields_sequence = sorted(fields.iteritems(),
key=lambda field: openerp.tools.ustr(field[1].get('string', '')))
records = []
for field_name, field in fields_sequence:
if import_compat:
if exclude and field_name in exclude:
continue
if field.get('readonly'):
# If none of the field's states unsets readonly, skip the field
if all(dict(attrs).get('readonly', True)
for attrs in field.get('states', {}).values()):
continue
if not field.get('exportable', True):
continue
id = prefix + (prefix and '/'or '') + field_name
name = parent_name + (parent_name and '/' or '') + field['string']
record = {'id': id, 'string': name,
'value': id, 'children': False,
'field_type': field.get('type'),
'required': field.get('required'),
'relation_field': field.get('relation_field')}
records.append(record)
if len(name.split('/')) < 3 and 'relation' in field:
ref = field.pop('relation')
record['value'] += '/id'
record['params'] = {'model': ref, 'prefix': id, 'name': name}
if not import_compat or field['type'] == 'one2many':
# m2m field in import_compat is childless
record['children'] = True
return records
@http.route('/web/export/namelist', type='json', auth="user")
def namelist(self, model, export_id):
# TODO: namelist really has no reason to be in Python (although itertools.groupby helps)
export = request.session.model("ir.exports").read([export_id])[0]
export_fields_list = request.session.model("ir.exports.line").read(
export['export_fields'])
fields_data = self.fields_info(
model, map(operator.itemgetter('name'), export_fields_list))
return [
{'name': field['name'], 'label': fields_data[field['name']]}
for field in export_fields_list
]
def fields_info(self, model, export_fields):
info = {}
fields = self.fields_get(model)
if ".id" in export_fields:
fields['.id'] = fields.pop('id', {'string': 'ID'})
# To make fields retrieval more efficient, fetch all sub-fields of a
# given field at the same time. Because the order in the export list is
# arbitrary, this requires ordering all sub-fields of a given field
# together so they can be fetched at the same time
#
# Works the following way:
# * sort the list of fields to export, the default sorting order will
# put the field itself (if present, for xmlid) and all of its
# sub-fields right after it
# * then, group on: the first field of the path (which is the same for
# a field and for its subfields and the length of splitting on the
# first '/', which basically means grouping the field on one side and
# all of the subfields on the other. This way, we have the field (for
# the xmlid) with length 1, and all of the subfields with the same
# base but a length "flag" of 2
# * if we have a normal field (length 1), just add it to the info
# mapping (with its string) as-is
# * otherwise, recursively call fields_info via graft_subfields.
# all graft_subfields does is take the result of fields_info (on the
# field's model) and prepend the current base (current field), which
# rebuilds the whole sub-tree for the field
#
# result: because we're not fetching the fields_get for half the
# database models, fetching a namelist with a dozen fields (including
# relational data) falls from ~6s to ~300ms (on the leads model).
# export lists with no sub-fields (e.g. import_compatible lists with
# no o2m) are even more efficient (from the same 6s to ~170ms, as
# there's a single fields_get to execute)
for (base, length), subfields in itertools.groupby(
sorted(export_fields),
lambda field: (field.split('/', 1)[0], len(field.split('/', 1)))):
subfields = list(subfields)
if length == 2:
# subfields is a seq of $base/*rest, and not loaded yet
info.update(self.graft_subfields(
fields[base]['relation'], base, fields[base]['string'],
subfields
))
elif base in fields:
info[base] = fields[base]['string']
return info
def graft_subfields(self, model, prefix, prefix_string, fields):
export_fields = [field.split('/', 1)[1] for field in fields]
return (
(prefix + '/' + k, prefix_string + '/' + v)
for k, v in self.fields_info(model, export_fields).iteritems())
class ExportFormat(object):
raw_data = False
@property
def content_type(self):
""" Provides the format's content type """
raise NotImplementedError()
def filename(self, base):
""" Creates a valid filename for the format (with extension) from the
provided base name (exension-less)
"""
raise NotImplementedError()
def from_data(self, fields, rows):
""" Conversion method from OpenERP's export data to whatever the
current export class outputs
:params list fields: a list of fields to export
:params list rows: a list of records to export
:returns:
:rtype: bytes
"""
raise NotImplementedError()
def base(self, data, token):
model, fields, ids, domain, import_compat = \
operator.itemgetter('model', 'fields', 'ids', 'domain',
'import_compat')(
simplejson.loads(data))
Model = request.session.model(model)
ids = ids or Model.search(domain, 0, False, False, request.context)
field_names = map(operator.itemgetter('name'), fields)
import_data = Model.export_data(ids, field_names, self.raw_data, context=request.context).get('datas',[])
if import_compat:
columns_headers = field_names
else:
columns_headers = [val['label'].strip() for val in fields]
return request.make_response(self.from_data(columns_headers, import_data),
headers=[('Content-Disposition',
content_disposition(self.filename(model))),
('Content-Type', self.content_type)],
cookies={'fileToken': token})
class CSVExport(ExportFormat, http.Controller):
@http.route('/web/export/csv', type='http', auth="user")
@serialize_exception
def index(self, data, token):
return self.base(data, token)
@property
def content_type(self):
return 'text/csv;charset=utf8'
def filename(self, base):
return base + '.csv'
def from_data(self, fields, rows):
fp = StringIO()
writer = csv.writer(fp, quoting=csv.QUOTE_ALL)
writer.writerow([name.encode('utf-8') for name in fields])
for data in rows:
row = []
for d in data:
if isinstance(d, basestring):
d = d.replace('\n',' ').replace('\t',' ')
try:
d = d.encode('utf-8')
except UnicodeError:
pass
if d is False: d = None
row.append(d)
writer.writerow(row)
fp.seek(0)
data = fp.read()
fp.close()
return data
class ExcelExport(ExportFormat, http.Controller):
# Excel needs raw data to correctly handle numbers and date values
raw_data = True
@http.route('/web/export/xls', type='http', auth="user")
@serialize_exception
def index(self, data, token):
return self.base(data, token)
@property
def content_type(self):
return 'application/vnd.ms-excel'
def filename(self, base):
return base + '.xls'
def from_data(self, fields, rows):
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('Sheet 1')
for i, fieldname in enumerate(fields):
worksheet.write(0, i, fieldname)
worksheet.col(i).width = 8000 # around 220 pixels
base_style = xlwt.easyxf('align: wrap yes')
date_style = xlwt.easyxf('align: wrap yes', num_format_str='YYYY-MM-DD')
datetime_style = xlwt.easyxf('align: wrap yes', num_format_str='YYYY-MM-DD HH:mm:SS')
for row_index, row in enumerate(rows):
for cell_index, cell_value in enumerate(row):
cell_style = base_style
if isinstance(cell_value, basestring):
cell_value = re.sub("\r", " ", cell_value)
elif isinstance(cell_value, datetime.datetime):
cell_style = datetime_style
elif isinstance(cell_value, datetime.date):
cell_style = date_style
worksheet.write(row_index + 1, cell_index, cell_value, cell_style)
fp = StringIO()
workbook.save(fp)
fp.seek(0)
data = fp.read()
fp.close()
return data
class Reports(http.Controller):
POLLING_DELAY = 0.25
TYPES_MAPPING = {
'doc': 'application/vnd.ms-word',
'html': 'text/html',
'odt': 'application/vnd.oasis.opendocument.text',
'pdf': 'application/pdf',
'sxw': 'application/vnd.sun.xml.writer',
'xls': 'application/vnd.ms-excel',
}
@http.route('/web/report', type='http', auth="user")
@serialize_exception
def index(self, action, token):
action = simplejson.loads(action)
report_srv = request.session.proxy("report")
context = dict(request.context)
context.update(action["context"])
report_data = {}
report_ids = context.get("active_ids", None)
if 'report_type' in action:
report_data['report_type'] = action['report_type']
if 'datas' in action:
if 'ids' in action['datas']:
report_ids = action['datas'].pop('ids')
report_data.update(action['datas'])
report_id = report_srv.report(
request.session.db, request.session.uid, request.session.password,
action["report_name"], report_ids,
report_data, context)
report_struct = None
while True:
report_struct = report_srv.report_get(
request.session.db, request.session.uid, request.session.password, report_id)
if report_struct["state"]:
break
time.sleep(self.POLLING_DELAY)
report = base64.b64decode(report_struct['result'])
if report_struct.get('code') == 'zlib':
report = zlib.decompress(report)
report_mimetype = self.TYPES_MAPPING.get(
report_struct['format'], 'octet-stream')
file_name = action.get('name', 'report')
if 'name' not in action:
reports = request.session.model('ir.actions.report.xml')
res_id = reports.search([('report_name', '=', action['report_name']),],
0, False, False, context)
if len(res_id) > 0:
file_name = reports.read(res_id[0], ['name'], context)['name']
else:
file_name = action['report_name']
file_name = '%s.%s' % (file_name, report_struct['format'])
return request.make_response(report,
headers=[
('Content-Disposition', content_disposition(file_name)),
('Content-Type', report_mimetype),
('Content-Length', len(report))],
cookies={'fileToken': token})
class Apps(http.Controller):
@http.route('/apps/<app>', auth='user')
def get_app_url(self, req, app):
act_window_obj = request.session.model('ir.actions.act_window')
ir_model_data = request.session.model('ir.model.data')
try:
action_id = ir_model_data.get_object_reference('base', 'open_module_tree')[1]
action = act_window_obj.read(action_id, ['name', 'type', 'res_model', 'view_mode', 'view_type', 'context', 'views', 'domain'])
action['target'] = 'current'
except ValueError:
action = False
try:
app_id = ir_model_data.get_object_reference('base', 'module_%s' % app)[1]
except ValueError:
app_id = False
if action and app_id:
action['res_id'] = app_id
action['view_mode'] = 'form'
action['views'] = [(False, u'form')]
sakey = Session().save_session_action(action)
debug = '?debug' if req.debug else ''
return werkzeug.utils.redirect('/web{0}#sa={1}'.format(debug, sakey))
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
|
ahu-odoo/odoo
|
addons/web/controllers/main.py
|
Python
|
agpl-3.0
| 67,498
|
[
"VisIt"
] |
c0bf53c57a48426c954751ddc3350cd813ee6f5edd4bfd21e5a3f35215c3e0b3
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tempfile
import shutil
import pytest
from unittest import TestCase
import os
from bigdl.orca.data.image.parquet_dataset import ParquetDataset, read_parquet
from bigdl.orca.data.image.utils import DType, FeatureType, SchemaField
import tensorflow as tf
from bigdl.orca.ray import RayContext
resource_path = os.path.join(os.path.split(__file__)[0], "../resources")
WIDTH, HEIGHT, NUM_CHANNELS = 224, 224, 3
def images_generator():
dataset_path = os.path.join(resource_path, "cat_dog")
for root, dirs, files in os.walk(os.path.join(dataset_path, "cats")):
for name in files:
image_path = os.path.join(root, name)
yield {"image": image_path, "label": 1, "id": image_path}
for root, dirs, files in os.walk(os.path.join(dataset_path, "dogs")):
for name in files:
image_path = os.path.join(root, name)
yield {"image": image_path, "label": 0, "id": image_path}
images_schema = {
"image": SchemaField(feature_type=FeatureType.IMAGE, dtype=DType.FLOAT32, shape=()),
"label": SchemaField(feature_type=FeatureType.SCALAR, dtype=DType.FLOAT32, shape=()),
"id": SchemaField(feature_type=FeatureType.SCALAR, dtype=DType.STRING, shape=())
}
def parse_data_train(image, label):
image = tf.io.decode_jpeg(image, NUM_CHANNELS)
image = tf.image.resize(image, size=(WIDTH, HEIGHT))
image = tf.reshape(image, [WIDTH, HEIGHT, NUM_CHANNELS])
return image, label
def model_creator(config):
import tensorflow as tf
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(224, 224, 3)),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(2)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
class TestReadParquet(TestCase):
def test_read_parquet_images_tf_dataset(self):
temp_dir = tempfile.mkdtemp()
try:
ParquetDataset.write("file://" + temp_dir, images_generator(),
images_schema, block_size=4)
path = "file://" + temp_dir
output_types = {"id": tf.string, "image": tf.string, "label": tf.float32}
dataset = read_parquet("tf_dataset", path=path, output_types=output_types)
for dt in dataset.take(1):
print(dt.keys())
num_shards, rank = 3, 1
dataset_shard = read_parquet("tf_dataset", path=path, config={"num_shards": num_shards,
"rank": rank},
output_types=output_types)
assert len(list(dataset_shard)) <= len(list(dataset)) // num_shards, \
"len of dataset_shard should be 1/`num_shards` of the whole dataset."
dataloader = read_parquet("dataloader", path=path)
dataloader_shard = read_parquet("dataloader", path=path,
config={"num_shards": num_shards, "rank": rank})
cur_dl = iter(dataloader_shard)
cur_count = 0
while True:
try:
print(next(cur_dl)['label'])
cur_count += 1
except StopIteration:
break
assert cur_count == len(list(dataset_shard))
finally:
shutil.rmtree(temp_dir)
def test_parquet_images_training(self):
from bigdl.orca.learn.tf2 import Estimator
temp_dir = tempfile.mkdtemp()
try:
ParquetDataset.write("file://" + temp_dir, images_generator(), images_schema)
path = "file://" + temp_dir
output_types = {"id": tf.string, "image": tf.string, "label": tf.float32}
output_shapes = {"id": (), "image": (), "label": ()}
def data_creator(config, batch_size):
dataset = read_parquet("tf_dataset", path=path,
output_types=output_types, output_shapes=output_shapes)
dataset = dataset.shuffle(10)
dataset = dataset.map(lambda data_dict:
(data_dict["image"], data_dict["label"]))
dataset = dataset.map(parse_data_train)
dataset = dataset.batch(batch_size)
return dataset
ray_ctx = RayContext.get()
trainer = Estimator.from_keras(model_creator=model_creator)
trainer.fit(data=data_creator,
epochs=1,
batch_size=2)
finally:
shutil.rmtree(temp_dir)
if __name__ == "__main__":
pytest.main([__file__])
|
intel-analytics/BigDL
|
python/orca/test/bigdl/orca/data/test_read_parquet_images.py
|
Python
|
apache-2.0
| 5,390
|
[
"ORCA"
] |
be9bd7a45259f7886b42974254d2c50ce7a2daa1ff679c1cbdd91ae2e85164eb
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The Theme Manager manages adding, deleteing and modifying of themes.
"""
import os
import zipfile
import shutil
import logging
import re
from xml.etree.ElementTree import ElementTree, XML
from PyQt4 import QtCore, QtGui
from openlp.core.lib import ImageSource, OpenLPToolbar, Registry, Settings, UiStrings, get_text_file_string, \
build_icon, translate, check_item_selected, check_directory_exists, create_thumb, validate_thumb
from openlp.core.lib.theme import ThemeXML, BackgroundType, VerticalType, BackgroundGradientType
from openlp.core.lib.ui import critical_error_message_box, create_widget_action
from openlp.core.theme import Theme
from openlp.core.ui import FileRenameForm, ThemeForm
from openlp.core.utils import AppLocation, delete_file, get_locale_key, get_filesystem_encoding
log = logging.getLogger(__name__)
class ThemeManager(QtGui.QWidget):
"""
Manages the orders of Theme.
"""
def __init__(self, parent=None):
"""
Constructor
"""
super(ThemeManager, self).__init__(parent)
Registry().register('theme_manager', self)
Registry().register_function('bootstrap_initialise', self.load_first_time_themes)
Registry().register_function('bootstrap_post_set_up', self._push_themes)
self.settings_section = 'themes'
self.theme_form = ThemeForm(self)
self.file_rename_form = FileRenameForm()
# start with the layout
self.layout = QtGui.QVBoxLayout(self)
self.layout.setSpacing(0)
self.layout.setMargin(0)
self.layout.setObjectName('layout')
self.toolbar = OpenLPToolbar(self)
self.toolbar.setObjectName('toolbar')
self.toolbar.add_toolbar_action('newTheme',
text=UiStrings().NewTheme, icon=':/themes/theme_new.png',
tooltip=translate('OpenLP.ThemeManager', 'Create a new theme.'),
triggers=self.on_add_theme)
self.toolbar.add_toolbar_action('editTheme',
text=translate('OpenLP.ThemeManager', 'Edit Theme'),
icon=':/themes/theme_edit.png',
tooltip=translate('OpenLP.ThemeManager', 'Edit a theme.'),
triggers=self.on_edit_theme)
self.delete_toolbar_action = self.toolbar.add_toolbar_action('delete_theme',
text=translate('OpenLP.ThemeManager', 'Delete Theme'),
icon=':/general/general_delete.png',
tooltip=translate('OpenLP.ThemeManager', 'Delete a theme.'),
triggers=self.on_delete_theme)
self.toolbar.addSeparator()
self.toolbar.add_toolbar_action('importTheme',
text=translate('OpenLP.ThemeManager', 'Import Theme'),
icon=':/general/general_import.png',
tooltip=translate('OpenLP.ThemeManager', 'Import a theme.'),
triggers=self.on_import_theme)
self.toolbar.add_toolbar_action('exportTheme',
text=translate('OpenLP.ThemeManager', 'Export Theme'),
icon=':/general/general_export.png',
tooltip=translate('OpenLP.ThemeManager', 'Export a theme.'),
triggers=self.on_export_theme)
self.layout.addWidget(self.toolbar)
self.theme_widget = QtGui.QWidgetAction(self.toolbar)
self.theme_widget.setObjectName('theme_widget')
# create theme manager list
self.theme_list_widget = QtGui.QListWidget(self)
self.theme_list_widget.setAlternatingRowColors(True)
self.theme_list_widget.setIconSize(QtCore.QSize(88, 50))
self.theme_list_widget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.theme_list_widget.setObjectName('theme_list_widget')
self.layout.addWidget(self.theme_list_widget)
self.theme_list_widget.customContextMenuRequested.connect(self.context_menu)
# build the context menu
self.menu = QtGui.QMenu()
self.edit_action = create_widget_action(self.menu,
text=translate('OpenLP.ThemeManager', '&Edit Theme'),
icon=':/themes/theme_edit.png', triggers=self.on_edit_theme)
self.copy_action = create_widget_action(self.menu,
text=translate('OpenLP.ThemeManager', '&Copy Theme'),
icon=':/themes/theme_edit.png', triggers=self.on_copy_theme)
self.rename_action = create_widget_action(self.menu,
text=translate('OpenLP.ThemeManager', '&Rename Theme'),
icon=':/themes/theme_edit.png', triggers=self.on_rename_theme)
self.delete_action = create_widget_action(self.menu,
text=translate('OpenLP.ThemeManager', '&Delete Theme'),
icon=':/general/general_delete.png', triggers=self.on_delete_theme)
self.menu.addSeparator()
self.global_action = create_widget_action(self.menu,
text=translate('OpenLP.ThemeManager', 'Set As &Global Default'),
icon=':/general/general_export.png',
triggers=self.change_global_from_screen)
self.exportAction = create_widget_action(self.menu,
text=translate('OpenLP.ThemeManager', '&Export Theme'),
icon=':/general/general_export.png', triggers=self.on_export_theme)
# Signals
self.theme_list_widget.doubleClicked.connect(self.change_global_from_screen)
self.theme_list_widget.currentItemChanged.connect(self.check_list_state)
Registry().register_function('theme_update_global', self.change_global_from_tab)
# Variables
self.theme_list = []
self.path = AppLocation.get_section_data_path(self.settings_section)
check_directory_exists(self.path)
self.thumb_path = os.path.join(self.path, 'thumbnails')
check_directory_exists(self.thumb_path)
self.theme_form.path = self.path
self.old_background_image = None
self.bad_v1_name_chars = re.compile(r'[%+\[\]]')
# Last little bits of setting up
self.global_theme = Settings().value(self.settings_section + '/global theme')
def check_list_state(self, item):
"""
If Default theme selected remove delete button.
"""
if item is None:
return
real_theme_name = item.data(QtCore.Qt.UserRole)
theme_name = item.text()
# If default theme restrict actions
if real_theme_name == theme_name:
self.delete_toolbar_action.setVisible(True)
else:
self.delete_toolbar_action.setVisible(False)
def context_menu(self, point):
"""
Build the Right Click Context menu and set state depending on
the type of theme.
"""
item = self.theme_list_widget.itemAt(point)
if item is None:
return
real_theme_name = item.data(QtCore.Qt.UserRole)
theme_name = str(item.text())
visible = real_theme_name == theme_name
self.delete_action.setVisible(visible)
self.rename_action.setVisible(visible)
self.global_action.setVisible(visible)
self.menu.exec_(self.theme_list_widget.mapToGlobal(point))
def change_global_from_tab(self):
"""
Change the global theme when it is changed through the Themes settings tab
"""
self.global_theme = Settings().value(self.settings_section + '/global theme')
log.debug('change_global_from_tab %s', self.global_theme)
for count in range(0, self.theme_list_widget.count()):
# reset the old name
item = self.theme_list_widget.item(count)
old_name = item.text()
new_name = item.data(QtCore.Qt.UserRole)
if old_name != new_name:
self.theme_list_widget.item(count).setText(new_name)
# Set the new name
if self.global_theme == new_name:
name = translate('OpenLP.ThemeManager', '%s (default)') % new_name
self.theme_list_widget.item(count).setText(name)
self.delete_toolbar_action.setVisible(item not in self.theme_list_widget.selectedItems())
def change_global_from_screen(self, index=-1):
"""
Change the global theme when a theme is double clicked upon in the
Theme Manager list
"""
log.debug('change_global_from_screen %s', index)
selected_row = self.theme_list_widget.currentRow()
for count in range(0, self.theme_list_widget.count()):
item = self.theme_list_widget.item(count)
old_name = item.text()
# reset the old name
if old_name != item.data(QtCore.Qt.UserRole):
self.theme_list_widget.item(count).setText(item.data(QtCore.Qt.UserRole))
# Set the new name
if count == selected_row:
self.global_theme = self.theme_list_widget.item(count).text()
name = translate('OpenLP.ThemeManager', '%s (default)') % self.global_theme
self.theme_list_widget.item(count).setText(name)
Settings().setValue(self.settings_section + '/global theme', self.global_theme)
Registry().execute('theme_update_global')
self._push_themes()
def on_add_theme(self):
"""
Loads a new theme with the default settings and then launches the theme
editing form for the user to make their customisations.
"""
theme = ThemeXML()
theme.set_default_header_footer()
self.theme_form.theme = theme
self.theme_form.exec_()
self.load_themes()
def on_rename_theme(self):
"""
Renames an existing theme to a new name
"""
if self._validate_theme_action(translate('OpenLP.ThemeManager', 'You must select a theme to rename.'),
translate('OpenLP.ThemeManager', 'Rename Confirmation'),
translate('OpenLP.ThemeManager', 'Rename %s theme?'), False, False):
item = self.theme_list_widget.currentItem()
old_theme_name = item.data(QtCore.Qt.UserRole)
self.file_rename_form.file_name_edit.setText(old_theme_name)
if self.file_rename_form.exec_():
new_theme_name = self.file_rename_form.file_name_edit.text()
if old_theme_name == new_theme_name:
return
if self.check_if_theme_exists(new_theme_name):
old_theme_data = self.get_theme_data(old_theme_name)
self.clone_theme_data(old_theme_data, new_theme_name)
self.delete_theme(old_theme_name)
for plugin in self.plugin_manager.plugins:
if plugin.uses_theme(old_theme_name):
plugin.rename_theme(old_theme_name, new_theme_name)
self.renderer.update_theme(new_theme_name, old_theme_name)
self.load_themes()
def on_copy_theme(self):
"""
Copies an existing theme to a new name
"""
item = self.theme_list_widget.currentItem()
old_theme_name = item.data(QtCore.Qt.UserRole)
self.file_rename_form.file_name_edit.setText(translate('OpenLP.ThemeManager',
'Copy of %s', 'Copy of <theme name>') % old_theme_name)
if self.file_rename_form.exec_(True):
new_theme_name = self.file_rename_form.file_name_edit.text()
if self.check_if_theme_exists(new_theme_name):
theme_data = self.get_theme_data(old_theme_name)
self.clone_theme_data(theme_data, new_theme_name)
def clone_theme_data(self, theme_data, new_theme_name):
"""
Takes a theme and makes a new copy of it as well as saving it.
"""
log.debug('clone_theme_data')
save_to = None
save_from = None
if theme_data.background_type == 'image':
save_to = os.path.join(self.path, new_theme_name, os.path.split(str(theme_data.background_filename))[1])
save_from = theme_data.background_filename
theme_data.theme_name = new_theme_name
theme_data.extend_image_filename(self.path)
self.save_theme(theme_data, save_from, save_to)
self.load_themes()
def on_edit_theme(self):
"""
Loads the settings for the theme that is to be edited and launches the
theme editing form so the user can make their changes.
"""
if check_item_selected(self.theme_list_widget,
translate('OpenLP.ThemeManager', 'You must select a theme to edit.')):
item = self.theme_list_widget.currentItem()
theme = self.get_theme_data(item.data(QtCore.Qt.UserRole))
if theme.background_type == 'image':
self.old_background_image = theme.background_filename
self.theme_form.theme = theme
self.theme_form.exec_(True)
self.old_background_image = None
self.renderer.update_theme(theme.theme_name)
self.load_themes()
def on_delete_theme(self):
"""
Delete a theme
"""
if self._validate_theme_action(translate('OpenLP.ThemeManager', 'You must select a theme to delete.'),
translate('OpenLP.ThemeManager', 'Delete Confirmation'),
translate('OpenLP.ThemeManager', 'Delete %s theme?')):
item = self.theme_list_widget.currentItem()
theme = item.text()
row = self.theme_list_widget.row(item)
self.theme_list_widget.takeItem(row)
self.delete_theme(theme)
self.renderer.update_theme(theme, only_delete=True)
# As we do not reload the themes, push out the change. Reload the
# list as the internal lists and events need to be triggered.
self._push_themes()
def delete_theme(self, theme):
"""
Delete a theme.
``theme``
The theme to delete.
"""
self.theme_list.remove(theme)
thumb = '%s.png' % theme
delete_file(os.path.join(self.path, thumb))
delete_file(os.path.join(self.thumb_path, thumb))
try:
encoding = get_filesystem_encoding()
shutil.rmtree(os.path.join(self.path, theme).encode(encoding))
except OSError as xxx_todo_changeme1:
shutil.Error = xxx_todo_changeme1
log.exception('Error deleting theme %s', theme)
def on_export_theme(self):
"""
Export the theme in a zip file
"""
item = self.theme_list_widget.currentItem()
if item is None:
critical_error_message_box(message=translate('OpenLP.ThemeManager', 'You have not selected a theme.'))
return
theme = item.data(QtCore.Qt.UserRole)
path = QtGui.QFileDialog.getExistingDirectory(self,
translate('OpenLP.ThemeManager', 'Save Theme - (%s)') % theme,
Settings().value(self.settings_section + '/last directory export'))
self.application.set_busy_cursor()
if path:
Settings().setValue(self.settings_section + '/last directory export', path)
theme_path = os.path.join(path, theme + '.otz')
theme_zip = None
try:
theme_zip = zipfile.ZipFile(theme_path, 'w')
source = os.path.join(self.path, theme)
for files in os.walk(source):
for name in files[2]:
theme_zip.write(
os.path.join(source, name).encode('utf-8'), os.path.join(theme, name).encode('utf-8')
)
QtGui.QMessageBox.information(self,
translate('OpenLP.ThemeManager', 'Theme Exported'),
translate('OpenLP.ThemeManager', 'Your theme has been successfully exported.'))
except (IOError, OSError):
log.exception('Export Theme Failed')
critical_error_message_box(translate('OpenLP.ThemeManager', 'Theme Export Failed'),
translate('OpenLP.ThemeManager', 'Your theme could not be exported due to an error.'))
finally:
if theme_zip:
theme_zip.close()
self.application.set_normal_cursor()
def on_import_theme(self):
"""
Opens a file dialog to select the theme file(s) to import before attempting to extract OpenLP themes from
those files. This process will load both OpenLP version 1 and version 2 themes.
"""
files = QtGui.QFileDialog.getOpenFileNames(self,
translate('OpenLP.ThemeManager', 'Select Theme Import File'),
Settings().value(self.settings_section + '/last directory import'),
translate('OpenLP.ThemeManager', 'OpenLP Themes (*.theme *.otz)'))
log.info('New Themes %s', str(files))
if not files:
return
self.application.set_busy_cursor()
for file_name in files:
Settings().setValue(self.settings_section + '/last directory import', str(file_name))
self.unzip_theme(file_name, self.path)
self.load_themes()
self.application.set_normal_cursor()
def load_first_time_themes(self):
"""
Imports any themes on start up and makes sure there is at least one theme
"""
self.application.set_busy_cursor()
files = AppLocation.get_files(self.settings_section, '.otz')
for theme_file in files:
theme_file = os.path.join(self.path, theme_file)
self.unzip_theme(theme_file, self.path)
delete_file(theme_file)
files = AppLocation.get_files(self.settings_section, '.png')
# No themes have been found so create one
if not files:
theme = ThemeXML()
theme.theme_name = UiStrings().Default
self._write_theme(theme, None, None)
Settings().setValue(self.settings_section + '/global theme', theme.theme_name)
self.application.set_normal_cursor()
self.load_themes()
def load_themes(self):
"""
Loads the theme lists and triggers updates across the whole system
using direct calls or core functions and events for the plugins.
The plugins will call back in to get the real list if they want it.
"""
log.debug('Load themes from dir')
self.theme_list = []
self.theme_list_widget.clear()
files = AppLocation.get_files(self.settings_section, '.png')
# Sort the themes by its name considering language specific
files.sort(key=lambda file_name: get_locale_key(str(file_name)))
# now process the file list of png files
for name in files:
# check to see file is in theme root directory
theme = os.path.join(self.path, name)
if os.path.exists(theme):
text_name = os.path.splitext(name)[0]
if text_name == self.global_theme:
name = translate('OpenLP.ThemeManager', '%s (default)') % text_name
else:
name = text_name
thumb = os.path.join(self.thumb_path, '%s.png' % text_name)
item_name = QtGui.QListWidgetItem(name)
if validate_thumb(theme, thumb):
icon = build_icon(thumb)
else:
icon = create_thumb(theme, thumb)
item_name.setIcon(icon)
item_name.setData(QtCore.Qt.UserRole, text_name)
self.theme_list_widget.addItem(item_name)
self.theme_list.append(text_name)
self._push_themes()
def _push_themes(self):
"""
Notify listeners that the theme list has been updated
"""
Registry().execute('theme_update_list', self.get_themes())
def get_themes(self):
"""
Return the list of loaded themes
"""
log.debug('get themes')
return self.theme_list
def get_theme_data(self, theme_name):
"""
Returns a theme object from an XML file
``theme_name``
Name of the theme to load from file
"""
log.debug('get theme data for theme %s', theme_name)
xml_file = os.path.join(self.path, str(theme_name), str(theme_name) + '.xml')
xml = get_text_file_string(xml_file)
if not xml:
log.debug('No theme data - using default theme')
return ThemeXML()
else:
return self._create_theme_fom_Xml(xml, self.path)
def over_write_message_box(self, theme_name):
"""
Display a warning box to the user that a theme already exists
"""
ret = QtGui.QMessageBox.question(self, translate('OpenLP.ThemeManager', 'Theme Already Exists'),
translate('OpenLP.ThemeManager',
'Theme %s already exists. Do you want to replace it?').replace('%s', theme_name),
QtGui.QMessageBox.StandardButtons(QtGui.QMessageBox.Yes | QtGui.QMessageBox.No),
QtGui.QMessageBox.No)
return ret == QtGui.QMessageBox.Yes
def unzip_theme(self, file_name, directory):
"""
Unzip the theme, remove the preview file if stored
Generate a new preview file. Check the XML theme version and upgrade if
necessary.
"""
log.debug('Unzipping theme %s', file_name)
file_name = str(file_name)
theme_zip = None
out_file = None
file_xml = None
abort_import = True
try:
theme_zip = zipfile.ZipFile(file_name)
xml_file = [name for name in theme_zip.namelist() if os.path.splitext(name)[1].lower() == '.xml']
if len(xml_file) != 1:
log.exception('Theme contains "%s" XML files' % len(xml_file))
raise Exception('validation')
xml_tree = ElementTree(element=XML(theme_zip.read(xml_file[0]))).getroot()
v1_background = xml_tree.find('BackgroundType')
if v1_background is not None:
theme_name, file_xml, out_file, abort_import = \
self.unzip_version_122(directory, theme_zip, xml_file[0], xml_tree, v1_background, out_file)
else:
theme_name = xml_tree.find('name').text.strip()
theme_folder = os.path.join(directory, theme_name)
theme_exists = os.path.exists(theme_folder)
if theme_exists and not self.over_write_message_box(theme_name):
abort_import = True
return
else:
abort_import = False
for name in theme_zip.namelist():
name = name.replace('/', os.path.sep)
split_name = name.split(os.path.sep)
if split_name[-1] == '' or len(split_name) == 1:
# is directory or preview file
continue
full_name = os.path.join(directory, name)
check_directory_exists(os.path.dirname(full_name))
if os.path.splitext(name)[1].lower() == '.xml':
file_xml = str(theme_zip.read(name), 'utf-8')
out_file = open(full_name, 'w')
out_file.write(file_xml)
else:
out_file = open(full_name, 'wb')
out_file.write(theme_zip.read(name))
out_file.close()
except (IOError, zipfile.BadZipfile):
log.exception('Importing theme from zip failed %s' % file_name)
raise Exception('validation')
except Exception as info:
if str(info) == 'validation':
critical_error_message_box(translate('OpenLP.ThemeManager',
'Validation Error'), translate('OpenLP.ThemeManager', 'File is not a valid theme.'))
else:
raise
finally:
# Close the files, to be able to continue creating the theme.
if theme_zip:
theme_zip.close()
if out_file:
out_file.close()
if not abort_import:
# As all files are closed, we can create the Theme.
if file_xml:
theme = self._create_theme_fom_Xml(file_xml, self.path)
self.generate_and_save_image(directory, theme_name, theme)
# Only show the error message, when IOError was not raised (in
# this case the error message has already been shown).
elif theme_zip is not None:
critical_error_message_box(
translate('OpenLP.ThemeManager', 'Validation Error'),
translate('OpenLP.ThemeManager', 'File is not a valid theme.'))
log.exception('Theme file does not contain XML data %s' % file_name)
def unzip_version_122(self, dir_name, zip_file, xml_file, xml_tree, background, out_file):
"""
Unzip openlp.org 1.2x theme file and upgrade the theme xml. When calling
this method, please keep in mind, that some parameters are redundant.
"""
theme_name = xml_tree.find('Name').text.strip()
theme_name = self.bad_v1_name_chars.sub('', theme_name)
theme_folder = os.path.join(dir_name, theme_name)
theme_exists = os.path.exists(theme_folder)
if theme_exists and not self.over_write_message_box(theme_name):
return '', '', '', True
themedir = os.path.join(dir_name, theme_name)
check_directory_exists(themedir)
file_xml = str(zip_file.read(xml_file), 'utf-8')
file_xml = self._migrate_version_122(file_xml)
out_file = open(os.path.join(themedir, theme_name + '.xml'), 'w')
out_file.write(file_xml.encode('utf-8'))
out_file.close()
if background.text.strip() == '2':
image_name = xml_tree.find('BackgroundParameter1').text.strip()
# image file has same extension and is in subfolder
image_file = [name for name in zip_file.namelist() if os.path.splitext(name)[1].lower()
== os.path.splitext(image_name)[1].lower() and name.find(r'/')]
if len(image_file) >= 1:
out_file = open(os.path.join(themedir, image_name), 'wb')
out_file.write(zip_file.read(image_file[0]))
out_file.close()
else:
log.exception('Theme file does not contain image file "%s"' % image_name.decode('utf-8', 'replace'))
raise Exception('validation')
return theme_name, file_xml, out_file, False
def check_if_theme_exists(self, theme_name):
"""
Check if theme already exists and displays error message
``theme_name``
Name of the Theme to test
"""
theme_dir = os.path.join(self.path, theme_name)
if os.path.exists(theme_dir):
critical_error_message_box(
translate('OpenLP.ThemeManager', 'Validation Error'),
translate('OpenLP.ThemeManager', 'A theme with this name already exists.'))
return False
return True
def save_theme(self, theme, image_from, image_to):
"""
Called by thememaintenance Dialog to save the theme
and to trigger the reload of the theme list
"""
self._write_theme(theme, image_from, image_to)
if theme.background_type == BackgroundType.to_string(BackgroundType.Image):
self.image_manager.update_image_border(theme.background_filename,
ImageSource.Theme, QtGui.QColor(theme.background_border_color))
self.image_manager.process_updates()
def _write_theme(self, theme, image_from, image_to):
"""
Writes the theme to the disk and handles the background image if
necessary
"""
name = theme.theme_name
theme_pretty_xml = theme.extract_formatted_xml()
log.debug('save_theme %s %s', name, theme_pretty_xml.decode('utf-8'))
theme_dir = os.path.join(self.path, name)
check_directory_exists(theme_dir)
theme_file = os.path.join(theme_dir, name + '.xml')
if self.old_background_image and image_to != self.old_background_image:
delete_file(self.old_background_image)
out_file = None
try:
out_file = open(theme_file, 'w')
out_file.write(theme_pretty_xml.decode('UTF-8'))
except IOError:
log.exception('Saving theme to file failed')
finally:
if out_file:
out_file.close()
if image_from and image_from != image_to:
try:
encoding = get_filesystem_encoding()
shutil.copyfile(str(image_from).encode(encoding), str(image_to).encode(encoding))
except IOError as xxx_todo_changeme:
shutil.Error = xxx_todo_changeme
log.exception('Failed to save theme image')
self.generate_and_save_image(self.path, name, theme)
def generate_and_save_image(self, directory, name, theme):
"""
Generate and save a preview image
"""
log.debug('generate_and_save_image %s %s', directory, name)
frame = self.generate_image(theme)
sample_path_name = os.path.join(self.path, name + '.png')
if os.path.exists(sample_path_name):
os.unlink(sample_path_name)
frame.save(sample_path_name, 'png')
thumb = os.path.join(self.thumb_path, '%s.png' % name)
create_thumb(sample_path_name, thumb, False)
log.debug('Theme image written to %s', sample_path_name)
def update_preview_images(self):
"""
Called to update the themes' preview images.
"""
log.debug('update_preview_images')
self.main_window.display_progress_bar(len(self.theme_list))
for theme in self.theme_list:
self.main_window.increment_progress_bar()
self.generate_and_save_image(self.path, theme, self.get_theme_data(theme))
self.main_window.finished_progress_bar()
self.load_themes()
def generate_image(self, theme_data, forcePage=False):
"""
Call the renderer to build a Sample Image
``theme_data``
The theme to generated a preview for.
``forcePage``
Flag to tell message lines per page need to be generated.
"""
log.debug('generate_image \n%s ', theme_data)
return self.renderer.generate_preview(theme_data, forcePage)
def get_preview_image(self, theme):
"""
Return an image representing the look of the theme
``theme``
The theme to return the image for
"""
log.debug('get_preview_image %s ', theme)
image = os.path.join(self.path, theme + '.png')
return image
def _create_theme_fom_Xml(self, theme_xml, path):
"""
Return a theme object using information parsed from XML
``theme_xml``
The XML data to load into the theme
"""
theme = ThemeXML()
theme.parse(theme_xml)
theme.extend_image_filename(path)
return theme
def _validate_theme_action(self, select_text, confirm_title, confirm_text, testPlugin=True, confirm=True):
"""
Check to see if theme has been selected and the destructive action
is allowed.
"""
self.global_theme = Settings().value(self.settings_section + '/global theme')
if check_item_selected(self.theme_list_widget, select_text):
item = self.theme_list_widget.currentItem()
theme = item.text()
# confirm deletion
if confirm:
answer = QtGui.QMessageBox.question(self, confirm_title, confirm_text % theme,
QtGui.QMessageBox.StandardButtons(QtGui.QMessageBox.Yes | QtGui.QMessageBox.No),
QtGui.QMessageBox.No)
if answer == QtGui.QMessageBox.No:
return False
# should be the same unless default
if theme != item.data(QtCore.Qt.UserRole):
critical_error_message_box(
message=translate('OpenLP.ThemeManager', 'You are unable to delete the default theme.'))
return False
# check for use in the system else where.
if testPlugin:
for plugin in self.plugin_manager.plugins:
if plugin.uses_theme(theme):
critical_error_message_box(translate('OpenLP.ThemeManager', 'Validation Error'),
translate('OpenLP.ThemeManager', 'Theme %s is used in the %s plugin.') %
(theme, plugin.name))
return False
return True
return False
def _migrate_version_122(self, xml_data):
"""
Convert the xml data from version 1 format to the current format.
New fields are loaded with defaults to provide a complete, working
theme containing all compatible customisations from the old theme.
``xml_data``
Version 1 theme to convert
"""
theme = Theme(xml_data)
new_theme = ThemeXML()
new_theme.theme_name = self.bad_v1_name_chars.sub('', theme.Name)
if theme.BackgroundType == BackgroundType.Solid:
new_theme.background_type = BackgroundType.to_string(BackgroundType.Solid)
new_theme.background_color = str(theme.BackgroundParameter1.name())
elif theme.BackgroundType == BackgroundType.Horizontal:
new_theme.background_type = BackgroundType.to_string(BackgroundType.Gradient)
new_theme.background_direction = BackgroundGradientType.to_string(BackgroundGradientType.Horizontal)
if theme.BackgroundParameter3.name() == 1:
new_theme.background_direction = BackgroundGradientType.to_string(BackgroundGradientType.Horizontal)
new_theme.background_start_color = str(theme.BackgroundParameter1.name())
new_theme.background_end_color = str(theme.BackgroundParameter2.name())
elif theme.BackgroundType == BackgroundType.Image:
new_theme.background_type = BackgroundType.to_string(BackgroundType.Image)
new_theme.background_filename = str(theme.BackgroundParameter1)
elif theme.BackgroundType == BackgroundType.Transparent:
new_theme.background_type = BackgroundType.to_string(BackgroundType.Transparent)
new_theme.font_main_name = theme.FontName
new_theme.font_main_color = str(theme.FontColor.name())
new_theme.font_main_size = theme.FontProportion * 3
new_theme.font_footer_name = theme.FontName
new_theme.font_footer_color = str(theme.FontColor.name())
new_theme.font_main_shadow = False
if theme.Shadow == 1:
new_theme.font_main_shadow = True
new_theme.font_main_shadow_color = str(theme.ShadowColor.name())
if theme.Outline == 1:
new_theme.font_main_outline = True
new_theme.font_main_outline_color = str(theme.OutlineColor.name())
vAlignCorrection = VerticalType.Top
if theme.VerticalAlign == 2:
vAlignCorrection = VerticalType.Middle
elif theme.VerticalAlign == 1:
vAlignCorrection = VerticalType.Bottom
new_theme.display_horizontal_align = theme.HorizontalAlign
new_theme.display_vertical_align = vAlignCorrection
return new_theme.extract_xml()
def _get_renderer(self):
"""
Adds the Renderer to the class dynamically
"""
if not hasattr(self, '_renderer'):
self._renderer = Registry().get('renderer')
return self._renderer
renderer = property(_get_renderer)
def _get_image_manager(self):
"""
Adds the image manager to the class dynamically
"""
if not hasattr(self, '_image_manager'):
self._image_manager = Registry().get('image_manager')
return self._image_manager
image_manager = property(_get_image_manager)
def _get_plugin_manager(self):
"""
Adds the Renderer to the class dynamically
"""
if not hasattr(self, '_plugin_manager'):
self._plugin_manager = Registry().get('plugin_manager')
return self._plugin_manager
plugin_manager = property(_get_plugin_manager)
def _get_main_window(self):
"""
Adds the main window to the class dynamically
"""
if not hasattr(self, '_main_window'):
self._main_window = Registry().get('main_window')
return self._main_window
main_window = property(_get_main_window)
def _get_application(self):
"""
Adds the openlp to the class dynamically.
Windows needs to access the application in a dynamic manner.
"""
if os.name == 'nt':
return Registry().get('application')
else:
if not hasattr(self, '_application'):
self._application = Registry().get('application')
return self._application
application = property(_get_application)
|
marmyshev/item_title
|
openlp/core/ui/thememanager.py
|
Python
|
gpl-2.0
| 39,562
|
[
"Brian"
] |
decde9864c897da8741200ddc0aad67441278c7304b8181e3549e54fae25b371
|
# coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MyPy test runner script."""
from __future__ import annotations
import argparse
import os
import site
import subprocess
import sys
from scripts import common
from scripts import install_third_party_libs
# List of directories whose files won't be type-annotated ever.
EXCLUDED_DIRECTORIES = [
'proto_files/',
'scripts/linters/test_files/',
'third_party/',
'venv/'
]
# List of files who should be type-annotated but are not.
NOT_FULLY_COVERED_FILES = [
'core/controllers/',
'core/domain/action_registry.py',
'core/domain/action_registry_test.py',
'core/domain/activity_jobs_one_off.py',
'core/domain/activity_jobs_one_off_test.py',
'core/domain/activity_services.py',
'core/domain/activity_services_test.py',
'core/domain/activity_validators.py',
'core/domain/activity_validators_test.py',
'core/domain/app_feedback_report_validators.py',
'core/domain/app_feedback_report_validators_test.py',
'core/domain/audit_validators.py',
'core/domain/audit_validators_test.py',
'core/domain/auth_jobs_one_off.py',
'core/domain/auth_jobs_one_off_test.py',
'core/domain/auth_services.py',
'core/domain/auth_services_test.py',
'core/domain/auth_validators.py',
'core/domain/auth_validators_test.py',
'core/domain/base_model_validators.py',
'core/domain/base_model_validators_test.py',
'core/domain/beam_job_validators.py',
'core/domain/beam_job_validators_test.py',
'core/domain/blog_services.py',
'core/domain/blog_services_test.py',
'core/domain/blog_validators.py',
'core/domain/blog_validators_test.py',
'core/domain/caching_services.py',
'core/domain/caching_services_test.py',
'core/domain/calculation_registry.py',
'core/domain/calculation_registry_test.py',
'core/domain/change_domain.py',
'core/domain/classifier_services.py',
'core/domain/classifier_services_test.py',
'core/domain/classifier_validators.py',
'core/domain/classifier_validators_test.py',
'core/domain/classroom_services.py',
'core/domain/classroom_services_test.py',
'core/domain/collection_domain.py',
'core/domain/collection_domain_test.py',
'core/domain/collection_jobs_one_off.py',
'core/domain/collection_jobs_one_off_test.py',
'core/domain/collection_services.py',
'core/domain/collection_services_test.py',
'core/domain/collection_validators.py',
'core/domain/collection_validators_test.py',
'core/domain/config_domain.py',
'core/domain/config_domain_test.py',
'core/domain/config_services.py',
'core/domain/config_services_test.py',
'core/domain/config_validators.py',
'core/domain/config_validators_test.py',
'core/domain/cron_services.py',
'core/domain/customization_args_util.py',
'core/domain/customization_args_util_test.py',
'core/domain/draft_upgrade_services.py',
'core/domain/draft_upgrade_services_test.py',
'core/domain/email_jobs_one_off.py',
'core/domain/email_jobs_one_off_test.py',
'core/domain/email_manager.py',
'core/domain/email_manager_test.py',
'core/domain/email_services.py',
'core/domain/email_services_test.py',
'core/domain/email_subscription_services.py',
'core/domain/email_subscription_services_test.py',
'core/domain/email_validators.py',
'core/domain/email_validators_test.py',
'core/domain/event_services.py',
'core/domain/event_services_test.py',
'core/domain/exp_domain.py',
'core/domain/exp_domain_test.py',
'core/domain/exp_fetchers.py',
'core/domain/exp_fetchers_test.py',
'core/domain/exp_jobs_one_off.py',
'core/domain/exp_jobs_one_off_test.py',
'core/domain/exp_services.py',
'core/domain/exp_services_test.py',
'core/domain/exploration_validators.py',
'core/domain/exploration_validators_test.py',
'core/domain/expression_parser.py',
'core/domain/expression_parser_test.py',
'core/domain/feedback_jobs_one_off.py',
'core/domain/feedback_jobs_one_off_test.py',
'core/domain/feedback_services.py',
'core/domain/feedback_services_test.py',
'core/domain/feedback_validators.py',
'core/domain/feedback_validators_test.py',
'core/domain/fs_domain.py',
'core/domain/fs_domain_test.py',
'core/domain/fs_services.py',
'core/domain/fs_services_test.py',
'core/domain/html_cleaner.py',
'core/domain/html_cleaner_test.py',
'core/domain/html_validation_service.py',
'core/domain/html_validation_service_test.py',
'core/domain/image_services.py',
'core/domain/image_services_test.py',
'core/domain/image_validation_services.py',
'core/domain/image_validation_services_test.py',
'core/domain/improvements_services.py',
'core/domain/improvements_services_test.py',
'core/domain/improvements_validators.py',
'core/domain/improvements_validators_test.py',
'core/domain/interaction_jobs_one_off.py',
'core/domain/interaction_jobs_one_off_test.py',
'core/domain/interaction_registry.py',
'core/domain/interaction_registry_test.py',
'core/domain/job_validators.py',
'core/domain/job_validators_test.py',
'core/domain/learner_goals_services.py',
'core/domain/learner_goals_services_test.py',
'core/domain/learner_playlist_services.py',
'core/domain/learner_playlist_services_test.py',
'core/domain/learner_progress_services.py',
'core/domain/learner_progress_services_test.py',
'core/domain/moderator_services.py',
'core/domain/moderator_services_test.py',
'core/domain/object_registry.py',
'core/domain/object_registry_test.py',
'core/domain/opportunity_jobs_one_off.py',
'core/domain/opportunity_jobs_one_off_test.py',
'core/domain/opportunity_services.py',
'core/domain/opportunity_services_test.py',
'core/domain/opportunity_validators.py',
'core/domain/opportunity_validators_test.py',
'core/domain/param_domain.py',
'core/domain/param_domain_test.py',
'core/domain/platform_feature_services.py',
'core/domain/platform_feature_services_test.py',
'core/domain/platform_parameter_domain.py',
'core/domain/platform_parameter_domain_test.py',
'core/domain/platform_parameter_list.py',
'core/domain/platform_parameter_list_test.py',
'core/domain/platform_parameter_registry.py',
'core/domain/platform_parameter_registry_test.py',
'core/domain/playthrough_issue_registry.py',
'core/domain/playthrough_issue_registry_test.py',
'core/domain/prod_validation_jobs_one_off.py',
'core/domain/question_domain.py',
'core/domain/question_domain_test.py',
'core/domain/question_fetchers.py',
'core/domain/question_fetchers_test.py',
'core/domain/question_jobs_one_off.py',
'core/domain/question_jobs_one_off_test.py',
'core/domain/question_services.py',
'core/domain/question_services_test.py',
'core/domain/question_validators.py',
'core/domain/question_validators_test.py',
'core/domain/rating_services.py',
'core/domain/rating_services_test.py',
'core/domain/recommendations_jobs_one_off.py',
'core/domain/recommendations_jobs_one_off_test.py',
'core/domain/recommendations_services.py',
'core/domain/recommendations_services_test.py',
'core/domain/recommendations_validators.py',
'core/domain/recommendations_validators_test.py',
'core/domain/rights_manager.py',
'core/domain/rights_manager_test.py',
'core/domain/role_services.py',
'core/domain/role_services_test.py',
'core/domain/rte_component_registry.py',
'core/domain/rte_component_registry_test.py',
'core/domain/rules_registry.py',
'core/domain/rules_registry_test.py',
'core/domain/search_services.py',
'core/domain/search_services_test.py',
'core/domain/skill_domain.py',
'core/domain/skill_domain_test.py',
'core/domain/skill_fetchers.py',
'core/domain/skill_fetchers_test.py',
'core/domain/skill_jobs_one_off.py',
'core/domain/skill_jobs_one_off_test.py',
'core/domain/skill_services.py',
'core/domain/skill_services_test.py',
'core/domain/skill_validators.py',
'core/domain/skill_validators_test.py',
'core/domain/state_domain.py',
'core/domain/state_domain_test.py',
'core/domain/statistics_validators.py',
'core/domain/statistics_validators_test.py',
'core/domain/stats_domain.py',
'core/domain/stats_domain_test.py',
'core/domain/stats_jobs_continuous.py',
'core/domain/stats_jobs_continuous_test.py',
'core/domain/stats_jobs_one_off.py',
'core/domain/stats_jobs_one_off_test.py',
'core/domain/stats_services.py',
'core/domain/stats_services_test.py',
'core/domain/storage_model_audit_jobs_test.py',
'core/domain/story_domain.py',
'core/domain/story_domain_test.py',
'core/domain/story_fetchers.py',
'core/domain/story_fetchers_test.py',
'core/domain/story_jobs_one_off.py',
'core/domain/story_jobs_one_off_test.py',
'core/domain/story_services.py',
'core/domain/story_services_test.py',
'core/domain/story_validators.py',
'core/domain/story_validators_test.py',
'core/domain/subscription_services.py',
'core/domain/subscription_services_test.py',
'core/domain/subtopic_page_domain.py',
'core/domain/subtopic_page_domain_test.py',
'core/domain/subtopic_page_services.py',
'core/domain/subtopic_page_services_test.py',
'core/domain/subtopic_validators.py',
'core/domain/subtopic_validators_test.py',
'core/domain/suggestion_jobs_one_off.py',
'core/domain/suggestion_jobs_one_off_test.py',
'core/domain/suggestion_registry.py',
'core/domain/suggestion_registry_test.py',
'core/domain/suggestion_services.py',
'core/domain/suggestion_services_test.py',
'core/domain/suggestion_validators.py',
'core/domain/suggestion_validators_test.py',
'core/domain/summary_services.py',
'core/domain/summary_services_test.py',
'core/domain/takeout_service.py',
'core/domain/takeout_service_test.py',
'core/domain/taskqueue_services.py',
'core/domain/taskqueue_services_test.py',
'core/domain/topic_domain.py',
'core/domain/topic_domain_test.py',
'core/domain/topic_fetchers.py',
'core/domain/topic_fetchers_test.py',
'core/domain/topic_jobs_one_off.py',
'core/domain/topic_jobs_one_off_test.py',
'core/domain/topic_services.py',
'core/domain/topic_services_test.py',
'core/domain/topic_validators.py',
'core/domain/topic_validators_test.py',
'core/domain/translatable_object_registry.py',
'core/domain/translatable_object_registry_test.py',
'core/domain/translation_fetchers.py',
'core/domain/translation_fetchers_test.py',
'core/domain/translation_services.py',
'core/domain/translation_services_test.py',
'core/domain/translation_validators.py',
'core/domain/translation_validators_test.py',
'core/domain/user_domain.py',
'core/domain/user_domain_test.py',
'core/domain/user_jobs_one_off.py',
'core/domain/user_jobs_one_off_test.py',
'core/domain/user_query_domain.py',
'core/domain/user_query_domain_test.py',
'core/domain/user_query_jobs_one_off.py',
'core/domain/user_query_jobs_one_off_test.py',
'core/domain/user_query_services.py',
'core/domain/user_query_services_test.py',
'core/domain/user_services.py',
'core/domain/user_services_test.py',
'core/domain/user_validators.py',
'core/domain/user_validators_test.py',
'core/domain/visualization_registry.py',
'core/domain/visualization_registry_test.py',
'core/domain/voiceover_services.py',
'core/domain/voiceover_services_test.py',
'core/domain/wipeout_jobs_one_off.py',
'core/domain/wipeout_jobs_one_off_test.py',
'core/domain/wipeout_service.py',
'core/domain/wipeout_service_test.py',
'core/platform/storage/cloud_storage_emulator.py',
'core/platform/storage/cloud_storage_emulator_test.py',
'core/platform_feature_list.py',
'core/platform_feature_list_test.py',
'core/storage/beam_job/gae_models.py',
'core/storage/beam_job/gae_models_test.py',
'core/storage/blog/gae_models.py',
'core/storage/blog/gae_models_test.py',
'core/storage/storage_models_test.py',
'core/tests/build_sources/extensions/CodeRepl.py',
'core/tests/build_sources/extensions/DragAndDropSortInput.py',
'core/tests/build_sources/extensions/base.py',
'core/tests/build_sources/extensions/base_test.py',
'core/tests/build_sources/extensions/models_test.py',
'core/tests/data/failing_tests.py',
'core/tests/data/image_constants.py',
'core/tests/data/unicode_and_str_handler.py',
'core/tests/gae_suite.py',
'core/tests/gae_suite_test.py',
'core/tests/load_tests/feedback_thread_summaries_test.py',
'core/tests/test_utils.py',
'core/tests/test_utils_test.py',
'core/jobs',
'core/python_utils.py',
'core/python_utils_test.py',
'extensions/',
'scripts/'
]
CONFIG_FILE_PATH = os.path.join('.', 'mypy.ini')
MYPY_REQUIREMENTS_FILE_PATH = os.path.join('.', 'mypy_requirements.txt')
MYPY_TOOLS_DIR = os.path.join(os.getcwd(), 'third_party', 'python3_libs')
PYTHON3_CMD = 'python3'
_PATHS_TO_INSERT = [MYPY_TOOLS_DIR, ]
_PARSER = argparse.ArgumentParser(
description='Python type checking using mypy script.'
)
_PARSER.add_argument(
'--skip-install',
help='If passed, skips installing dependencies.'
' By default, they are installed.',
action='store_true')
_PARSER.add_argument(
'--install-globally',
help='optional; if specified, installs mypy and its requirements globally.'
' By default, they are installed to %s' % MYPY_TOOLS_DIR,
action='store_true')
_PARSER.add_argument(
'--files',
help='Files to type-check',
action='store',
nargs='+'
)
def install_third_party_libraries(skip_install: bool) -> None:
"""Run the installation script.
Args:
skip_install: bool. Whether to skip running the installation script.
"""
if not skip_install:
install_third_party_libs.main()
def get_mypy_cmd(files, mypy_exec_path, using_global_mypy):
"""Return the appropriate command to be run.
Args:
files: list(list(str)). List having first element as list of string.
mypy_exec_path: str. Path of mypy executable.
using_global_mypy: bool. Whether generated command should run using
global mypy.
Returns:
list(str). List of command line arguments.
"""
if using_global_mypy:
mypy_cmd = 'mypy'
else:
mypy_cmd = mypy_exec_path
if files:
cmd = [mypy_cmd, '--config-file', CONFIG_FILE_PATH] + files
else:
excluded_files_regex = (
'|'.join(NOT_FULLY_COVERED_FILES + EXCLUDED_DIRECTORIES))
cmd = [
mypy_cmd, '--exclude', excluded_files_regex,
'--config-file', CONFIG_FILE_PATH, '.'
]
return cmd
def install_mypy_prerequisites(install_globally):
"""Install mypy and type stubs from mypy_requirements.txt.
Args:
install_globally: bool. Whether mypy and its requirements are to be
installed globally.
Returns:
tuple(int, str). The return code from installing prerequisites and the
path of the mypy executable.
"""
# TODO(#13398): Change MyPy installation after Python3 migration. Now, we
# install packages globally for CI. In CI, pip installation is not in a way
# we expect.
if install_globally:
cmd = [
PYTHON3_CMD, '-m', 'pip', 'install', '-r',
MYPY_REQUIREMENTS_FILE_PATH
]
else:
cmd = [
PYTHON3_CMD, '-m', 'pip', 'install', '-r',
MYPY_REQUIREMENTS_FILE_PATH, '--target', MYPY_TOOLS_DIR,
'--upgrade'
]
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = process.communicate()
if b'can\'t combine user with prefix' in output[1]:
uextention_text = ['--user', '--prefix=', '--system']
new_process = subprocess.Popen(
cmd + uextention_text, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
new_process.communicate()
_PATHS_TO_INSERT.append(os.path.join(site.USER_BASE, 'bin'))
mypy_exec_path = os.path.join(site.USER_BASE, 'bin', 'mypy')
return (new_process.returncode, mypy_exec_path)
else:
_PATHS_TO_INSERT.append(os.path.join(MYPY_TOOLS_DIR, 'bin'))
mypy_exec_path = os.path.join(MYPY_TOOLS_DIR, 'bin', 'mypy')
return (process.returncode, mypy_exec_path)
def main(args=None):
"""Runs the MyPy type checks."""
parsed_args = _PARSER.parse_args(args=args)
for directory in common.DIRS_TO_ADD_TO_SYS_PATH:
# The directories should only be inserted starting at index 1. See
# https://stackoverflow.com/a/10095099 and
# https://stackoverflow.com/q/10095037 for more details.
sys.path.insert(1, directory)
install_third_party_libraries(parsed_args.skip_install)
common.fix_third_party_imports()
print('Installing Mypy and stubs for third party libraries.')
return_code, mypy_exec_path = install_mypy_prerequisites(
parsed_args.install_globally)
if return_code != 0:
print('Cannot install Mypy and stubs for third party libraries.')
sys.exit(1)
print('Installed Mypy and stubs for third party libraries.')
print('Starting Mypy type checks.')
cmd = get_mypy_cmd(
parsed_args.files, mypy_exec_path, parsed_args.install_globally)
env = os.environ.copy()
for path in _PATHS_TO_INSERT:
env['PATH'] = '%s%s' % (path, os.pathsep) + env['PATH']
env['PYTHONPATH'] = MYPY_TOOLS_DIR
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
stdout, stderr = process.communicate()
# Standard and error output is in bytes, we need to decode the line to
# print it.
print(stdout.decode('utf-8'))
print(stderr.decode('utf-8'))
if process.returncode == 0:
print('Mypy type checks successful.')
else:
print(
'Mypy type checks unsuccessful. Please fix the errors. '
'For more information, visit: '
'https://github.com/oppia/oppia/wiki/Backend-Type-Annotations')
sys.exit(2)
return process.returncode
if __name__ == '__main__': # pragma: no cover
main()
|
brianrodri/oppia
|
scripts/run_mypy_checks.py
|
Python
|
apache-2.0
| 19,119
|
[
"VisIt"
] |
33c0b49c2556fffc0dbd4f0823ab9652d969b4887101b5712c156bc16e67265f
|
# Licensed under the MIT License - see LICENSE.rst
"""
Methods for fitting transit light curves, spot occultations, or both, using
`scipy` minimizers and `emcee`.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import emcee
from scipy import optimize, signal
import matplotlib.pyplot as plt
import batman
from copy import deepcopy
from emcee.utils import MPIPool
import sys
def gaussian(times, amplitude, t0, sigma):
"""
Gaussian function.
Parameters
----------
times : `numpy.ndarray`
Times
amplitude : float
Amplitude of gaussian (not normalized)
t0 : float
Central time in units of `times`
sigma : float
Gaussian width.
Returns
-------
y : `numpy.ndarray`
Gaussian evaluated at `times`
"""
return amplitude * np.exp(-0.5*(times - t0)**2/sigma**2)
def peak_finder_chi2(theta, x, y, yerr):
"""
Chi^2 model given parameters `theta` and data {`x`, `y`, `yerr`}
Parameters
----------
theta : list
Trial parameters
x : `numpy.ndarray`
Times [JD]
y : `numpy.ndarray`
Fluxes
yerr : `numpy.ndarray`
Uncertainties on fluxes
Returns
-------
chi2 : float
Chi^2 of the model
"""
model = summed_gaussians(x, theta)
return np.sum((y-model)**2/yerr**2)
def peak_finder(times, residuals, errors, transit_params, n_peaks=4,
plots=False, verbose=False, skip_priors=False):
"""
Find peaks in the residuals from a fit to a transit light curve, which
correspond to starspot occultations.
Parameters
----------
times : `numpy.ndarray`
Times [JD]
residuals : `numpy.ndarray`
Fluxes
errors : `numpy.ndarray`
Uncertainties on residuals
transit_params : `~batman.TransitParams`
Transit light curve parameters
n_peaks : bool (optional)
Number of peaks to search for. If more than `n_peaks` are found, return
only the `n_peaks` largest amplitude peaks.
plots : bool (optional)
Show diagnostic plots
verbose : bool (optional)
Warn if no peaks are found
Returns
-------
result_in_transit : list or `None`
List of all spot parameters in [amp, t0, sig, amp, t0, sig, ...] order
for spots detected.
Notes
-----
Review of minimizers tried for `peak_finder`:
`~scipy.optimize.fmin` gets amplitudes right, but doesn't vary sigmas much.
For this reason, it tends to do a better job of finding nearby, semi-
overlapping spots.
`~scipy.optimize.fmin_powell` varies amplitudes and sigmas lots, but
as a result, sometimes two nearby spots are fit with one wide gaussian.
"""
# http://stackoverflow.com/a/25666951
# Convolve residuals with a gaussian, find relative maxima
n_points_kernel = 100
window = signal.general_gaussian(n_points_kernel+1, p=1, sig=10)
filtered = signal.fftconvolve(window, residuals)
filtered = (np.max(residuals) / np.max(filtered)) * filtered
filtered = np.roll(filtered, int(-n_points_kernel/2))[:len(residuals)]
maxes = signal.argrelmax(filtered)[0]
# Only take maxima, not minima
maxes = maxes[filtered[maxes] > 0]
lower_t_bound, upper_t_bound = get_in_transit_bounds(times, transit_params)
maxes_in_transit = maxes[(times[maxes] < upper_t_bound) &
(times[maxes] > lower_t_bound)]
# Only take the `n_peaks` highest peaks
if len(maxes_in_transit) > n_peaks:
highest_maxes_in_transit = maxes_in_transit[np.argsort(filtered[maxes_in_transit])][-n_peaks:]
else:
highest_maxes_in_transit = maxes_in_transit
# plt.plot(times, filtered)
# plt.plot(times, residuals, '.')
# plt.plot(times[maxes_in_transit], filtered[maxes_in_transit], 'ro')
# [plt.axvline(times[m], color='k') for m in maxes]
# [plt.axvline(times[m], color='m') for m in maxes_in_transit]
# if len(maxes_in_transit) > n_peaks:
# [plt.axvline(times[m], color='b') for m in highest_maxes_in_transit]
# plt.axvline(upper_t_bound, color='r')
# plt.axvline(lower_t_bound, color='r')
# plt.show()
if len(maxes_in_transit) == 0:
if verbose:
print('no maxes found')
return None
peak_times = times[highest_maxes_in_transit]
peak_amplitudes = residuals[highest_maxes_in_transit]
peak_sigmas = np.zeros(len(peak_times)) + 2./60/24 # 3 min
input_parameters = np.vstack([peak_amplitudes, peak_times,
peak_sigmas]).T.ravel()
result = optimize.fmin_powell(peak_finder_chi2, input_parameters,
disp=False, args=(times, residuals, errors),
xtol=0.00001, ftol=0.00001)
# if np.all(result == input_parameters):
# print('oh no!, fmin didnt produce a fit')
# Only use gaussians that occur in transit (fmin fit is unbounded in time)
# and amplitude is positive:
split_result = np.split(result, len(input_parameters)/3)
result_in_transit = []
for amplitude, t0, sigma in split_result:
depth = transit_params.rp**2
trial_params = np.array([amplitude, t0, sigma])
if not np.isinf(lnprior(trial_params, residuals, lower_t_bound,
upper_t_bound, transit_params, skip_priors)):
result_in_transit.extend([amplitude, t0, np.abs(sigma)])
result_in_transit = np.array(result_in_transit)
if len(result_in_transit) == 0:
return None
if plots:
fig, ax = plt.subplots(3, 1, figsize=(8, 10), sharex=True)
ax[0].errorbar(times, residuals, fmt='.', color='k')
[ax[0].axvline(t) for t in result_in_transit[1::3]]
ax[0].plot(times, summed_gaussians(times, input_parameters), 'r')
ax[0].axhline(0, color='k', ls='--')
ax[0].set_ylabel('Transit Residuals')
ax[1].errorbar(times, residuals, fmt='.', color='k')
ax[1].plot(times, summed_gaussians(times, result_in_transit), 'r')
ax[1].axhline(0, color='k', ls='--')
ax[1].set_ylabel('Residuals')
ax[2].errorbar(times,
residuals - summed_gaussians(times, result_in_transit),
fmt='.', color='k')
#ax[1].errorbar(times, gaussian_model, fmt='.', color='r')
ax[2].axhline(0, color='k', ls='--')
ax[2].set_ylabel('Residuals')
for axis in ax:
axis.axvline(upper_t_bound, color='r')
axis.axvline(lower_t_bound, color='r')
fig.tight_layout()
plt.show()
return result_in_transit
def generate_lc(times, transit_params):
"""
Make a transit light curve.
Parameters
----------
times : `numpy.ndarray`
Times in JD
transit_params : `~batman.TransitParams`
Transit light curve parameters
Returns
-------
model_flux : `numpy.ndarray`
Fluxes from model transit light curve
"""
exp_time = 1./60/24 # 1 minute cadence -> [days]
m = batman.TransitModel(transit_params, times, supersample_factor=7,
exp_time=exp_time)
model_flux = m.light_curve(transit_params)
return model_flux
def summed_gaussians(times, spot_parameters):
"""
Take a list of gaussian input parameters (3 parameters per gaussian), make
a model of the sum of all of those gaussians.
Parameters
----------
times : `numpy.ndarray`
Times in JD
spot_parameters : list
List of all spot parameters in [amp, t0, sig, amp, t0, sig, ...] order
Returns
-------
model : `numpy.ndarray`
Sum of gaussians
"""
model = np.zeros(len(times), dtype=np.float128)
if spot_parameters is not None and len(spot_parameters) % 3 == 0:
split_input_parameters = np.split(np.array(spot_parameters),
len(spot_parameters)/3)
for amplitude, t0, sigma in split_input_parameters:
model += gaussian(times, amplitude, t0, sigma)
return model
def get_in_transit_bounds(times, params, duration_fraction=0.9):
"""
Approximate the boundaries of "in-transit" for tranist occuring
during times `times`.
Parameters
----------
times : `numpy.ndarray`
Times in JD
params : `~batman.TransitParams`
Transit light curve parameters
duration_fraction : float
Fraction of the full transit duration to consider "in-transit"
Returns
-------
lower_t_bound : float
Earliest in-transit time [JD]
upper_t_bound : float
Latest in-transit time [JD]
"""
phased = (times - params.t0) % params.per
near_transit = ((phased < params.duration*(0.5*duration_fraction)) |
(phased > params.per -
params.duration*(0.5*duration_fraction)))
if np.count_nonzero(near_transit) == 0:
near_transit = 0
return times[near_transit].min(), times[near_transit].max()
def lnprior(theta, y, lower_t_bound, upper_t_bound, transit_params,
skip_priors):
"""
Log prior for `emcee` runs.
Parameters
----------
theta : list
Fitting parameters
y : `numpy.ndarray`
Fluxes
lower_t_bound : float
Earliest in-transit time [JD]
upper_t_bound : float
Latest in-transit time [JD]
skip_priors : bool
Should the priors be skipped?
Returns
-------
lnpr : float
Log-prior for trial parameters `theta`
"""
spot_params = theta
amplitudes = spot_params[::3]
t0s = spot_params[1::3]
sigmas = spot_params[2::3]
depth = transit_params.rp**2
min_sigma = 1.5/60/24
max_sigma = transit_params.duration # 6.0e-3 # upper_t_bound - lower_t_bound
t0_ok = ((lower_t_bound < t0s) & (t0s < upper_t_bound)).all()
sigma_ok = ((min_sigma < sigmas) & (sigmas < max_sigma)).all()
if not skip_priors:
amplitude_ok = ((0 <= amplitudes) & (amplitudes < depth)).all()
else:
amplitude_ok = (amplitudes >= 0).all()
if amplitude_ok and t0_ok and sigma_ok:
return 0.0
return -np.inf
def lnlike(theta, x, y, yerr, transit_params, skip_priors=False):
"""
Log-likelihood of data given model.
Parameters
----------
theta : list
Trial parameters
x : `numpy.ndarray`
Times in JD
y : `numpy.ndarray`
Fluxes
yerr : `numpy.ndarray`
Uncertainties on fluxes
transit_params : `~batman.TransitParams`
Transit light curve parameters
Returns
-------
lnp : float
Log-likelihood of data given model, i.e. ln( P(x | theta) )
"""
model = spotted_transit_model(theta, x, transit_params, skip_priors)
return -0.5*np.sum((y-model)**2/yerr**2)
def lnprob(theta, x, y, yerr, lower_t_bound, upper_t_bound, transit_params,
skip_priors):
"""
Log probability.
Parameters
----------
theta : list
Trial parameters
x : `numpy.ndarray`
Times in JD
y : `numpy.ndarray`
Fluxes
yerr : `numpy.ndarray`
Uncertainties on fluxes
lower_t_bound : float
Earliest in-transit time [JD]
upper_t_bound : float
Latest in-transit time [JD]
transit_params : `~batman.TransitParams`
Transit light curve parameters
Returns
-------
"""
lp = lnprior(theta, y, lower_t_bound, upper_t_bound, transit_params,
skip_priors)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr, transit_params, skip_priors)
def spotted_transit_model(theta, times, transit_params, skip_priors=False):
"""
Compute sum of spot model and transit model
Parameters
----------
theta : list
Trial parameters
times : `numpy.ndarray`
Times in JD
transit_params : `~batman.TransitParams`
Transit light curve parameters
Returns
-------
f : `numpy.ndarray`
Model fluxes
"""
spot_params = theta
# Set depth according to input parameters, compute transit model
lower_t_bound, upper_t_bound = get_in_transit_bounds(times, transit_params,
duration_fraction=1.0)
transit_model = generate_lc(times, transit_params)
spot_model = summed_gaussians(times, spot_params)
# Sum the models only where planet is in transit
transit_plus_spot_model = transit_model
in_transit_times = (times < upper_t_bound) & (times > lower_t_bound)
transit_plus_spot_model[in_transit_times] += spot_model[in_transit_times]
if not skip_priors:
# Force all model fluxes <=1
transit_plus_spot_model[transit_plus_spot_model > 1] = 1.0
return transit_plus_spot_model
def spotted_transit_model_individuals(theta, times, transit_params):
"""
Compute sum of each spot model and the transit model individually,
return a list of each.
Parameters
----------
theta : list
Trial parameters
times : `numpy.ndarray`
Times in JD
transit_params : `~batman.TransitParams`
Transit light curve parameters
Returns
-------
f_list : list
List of model fluxes
"""
spot_params = theta
split_spot_params = np.split(spot_params, len(spot_params)/3)
return [spotted_transit_model(spot_params, times, transit_params)
for spot_params in split_spot_params]
def run_emcee_seeded(light_curve, transit_params, spot_parameters, n_steps,
n_walkers, output_path, burnin=0.7,
n_extra_spots=1, skip_priors=False):
"""
Fit for transit depth and spot parameters given initial guess informed by
results from `peak_finder`
Parameters
----------
light_curve : `friedrich.lightcurve.TransitLightCurve`
Light curve to fit
transit_params : `~batman.TransitParams`
Transit light curve parameters
spot_parameters : list
List of all spot parameters in [amp, t0, sig, amp, t0, sig, ...] order
n_steps : int
Number of MCMC steps to take
n_walkers : int
Number of MCMC walkers to initialize (must be even, more than twice the
number of free params in fit)
output_path : str
Path to HDF5 archive output for storing results
burnin : float
Fraction of total number of steps to save to output (will truncate
the first `burnin` of the light curve)
n_extra_spots : int
Add `n_extra_spots` extra spots to the fit to soak up spots not
predicted by `peak_finder`
skip_priors : bool
Should a prior be applied to the depth parameter?
Returns
-------
sampler : `emcee.EnsembleSampler`
Sampler object returned by `emcee`
"""
times = light_curve.times.jd
fluxes = light_curve.fluxes
errors = light_curve.errors
lower_t_bound, upper_t_bound = get_in_transit_bounds(times, transit_params)
amps = spot_parameters[::3]
init_depth = transit_params.rp**2
extra_spot_params = [0.1*np.min(amps), np.mean(times),
0.05*(upper_t_bound-lower_t_bound)]
fit_params = np.concatenate([spot_parameters,
n_extra_spots*extra_spot_params])
ndim, nwalkers = len(fit_params), n_walkers
pos = []
while len(pos) < nwalkers:
realization = fit_params + 1e-5*np.random.randn(ndim)
if not np.isinf(lnprior(realization, fluxes, lower_t_bound,
upper_t_bound, transit_params, skip_priors)):
pos.append(realization)
print('Begin MCMC...')
pool = MPIPool(loadbalance=True)
if not pool.is_master():
pool.wait()
sys.exit(0)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob,
args=(times, fluxes, errors, lower_t_bound,
upper_t_bound, transit_params,
skip_priors),
pool=pool)
sampler.run_mcmc(pos, n_steps)
print('Finished MCMC...')
pool.close()
burnin_len = int(burnin*n_steps)
from .storage import create_results_archive
create_results_archive(output_path, light_curve, sampler, burnin_len, ndim)
return sampler
|
bmorris3/friedrich
|
friedrich/fitting.py
|
Python
|
mit
| 16,547
|
[
"Gaussian"
] |
8ea8482fc88f30002118c582815ef2391aa91d6d0bc3f205c5c617c73d34c1fc
|
""" Test class for SiteDirector
"""
# pylint: disable=protected-access
# imports
import datetime
import pytest
from mock import MagicMock
from DIRAC import gLogger
# sut
from DIRAC.WorkloadManagementSystem.Agent.SiteDirector import SiteDirector
mockAM = MagicMock()
mockGCReply = MagicMock()
mockGCReply.return_value = "TestSetup"
mockOPSObject = MagicMock()
mockOPSObject.getValue.return_value = "123"
mockOPSReply = MagicMock()
mockOPSReply.return_value = "123"
mockOPS = MagicMock()
mockOPS.return_value = mockOPSObject
# mockOPS.Operations = mockOPSObject
mockPM = MagicMock()
mockPM.requestToken.return_value = {"OK": True, "Value": ("token", 1)}
mockPMReply = MagicMock()
mockPMReply.return_value = {"OK": True, "Value": ("token", 1)}
mockCSGlobalReply = MagicMock()
mockCSGlobalReply.return_value = "TestSetup"
mockResourcesReply = MagicMock()
mockResourcesReply.return_value = {"OK": True, "Value": ["x86_64-slc6", "x86_64-slc5"]}
mockPilotAgentsDB = MagicMock()
mockPilotAgentsDB.setPilotStatus.return_value = {"OK": True}
gLogger.setLevel("DEBUG")
@pytest.fixture
def sd(mocker):
"""mocker for SiteDirector"""
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.SiteDirector.AgentModule.__init__")
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.SiteDirector.gConfig.getValue", side_effect=mockGCReply)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.SiteDirector.Operations", side_effect=mockOPS)
mocker.patch(
"DIRAC.WorkloadManagementSystem.Agent.SiteDirector.gProxyManager.requestToken", side_effect=mockPMReply
)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.SiteDirector.AgentModule", side_effect=mockAM)
sd = SiteDirector()
sd.log = gLogger
sd.am_getOption = mockAM
sd.log.setLevel("DEBUG")
sd.rpcMatcher = MagicMock()
sd.rssClient = MagicMock()
sd.workingDirectory = ""
sd.queueDict = {
"aQueue": {
"Site": "LCG.CERN.cern",
"CEName": "aCE",
"CEType": "SSH",
"QueueName": "aQueue",
"ParametersDict": {
"CPUTime": 12345,
"Community": "lhcb",
"OwnerGroup": ["lhcb_user"],
"Setup": "LHCb-Production",
"Site": "LCG.CERN.cern",
},
}
}
return sd
def test__getPilotOptions(sd):
"""Testing SiteDirector()._getPilotOptions()"""
res = sd._getPilotOptions("aQueue")
assert set(["-S TestSetup", "-V 123", "-l 123", "-n LCG.CERN.cern"]) <= set(res)
@pytest.mark.parametrize(
"mockMatcherReturnValue, expected, anyExpected, sitesExpected",
[
({"OK": False, "Message": "boh"}, False, True, set()),
({"OK": True, "Value": None}, False, True, set()),
({"OK": True, "Value": {"1": {"Jobs": 10}, "2": {"Jobs": 20}}}, True, True, set()),
({"OK": True, "Value": {"1": {"Jobs": 10, "Sites": ["Site1"]}, "2": {"Jobs": 20}}}, True, True, set(["Site1"])),
(
{"OK": True, "Value": {"1": {"Jobs": 10, "Sites": ["Site1", "Site2"]}, "2": {"Jobs": 20}}},
True,
True,
set(["Site1", "Site2"]),
),
(
{
"OK": True,
"Value": {"1": {"Jobs": 10, "Sites": ["Site1", "Site2"]}, "2": {"Jobs": 20, "Sites": ["Site1"]}},
},
True,
False,
set(["Site1", "Site2"]),
),
(
{
"OK": True,
"Value": {"1": {"Jobs": 10, "Sites": ["Site1", "Site2"]}, "2": {"Jobs": 20, "Sites": ["ANY"]}},
},
True,
False,
{"Site1", "Site2", "ANY"},
),
(
{
"OK": True,
"Value": {"1": {"Jobs": 10, "Sites": ["Site1", "Site2"]}, "2": {"Jobs": 20, "Sites": ["ANY", "Site3"]}},
},
True,
False,
{"Site1", "Site2", "Site3", "ANY"},
),
(
{
"OK": True,
"Value": {"1": {"Jobs": 10, "Sites": ["Site1", "Site2"]}, "2": {"Jobs": 20, "Sites": ["Any", "Site3"]}},
},
True,
False,
{"Site1", "Site2", "Site3", "Any"},
),
(
{
"OK": True,
"Value": {
"1": {"Jobs": 10, "Sites": ["Site1", "Site2"]},
"2": {"Jobs": 20, "Sites": ["NotAny", "Site2"]},
},
},
True,
False,
{"Site1", "Site2", "NotAny"},
),
],
)
def test__ifAndWhereToSubmit(sd, mockMatcherReturnValue, expected, anyExpected, sitesExpected):
"""Testing SiteDirector()._ifAndWhereToSubmit()"""
sd.matcherClient = MagicMock()
sd.matcherClient.getMatchingTaskQueues.return_value = mockMatcherReturnValue
res = sd._ifAndWhereToSubmit()
assert res[0] == expected
if res[0]:
assert res == (expected, anyExpected, sitesExpected, set())
def test__allowedToSubmit(sd):
"""Testing SiteDirector()._allowedToSubmit()"""
submit = sd._allowedToSubmit("aQueue", True, set(["LCG.CERN.cern"]), set())
assert submit is False
sd.siteMaskList = ["LCG.CERN.cern", "DIRAC.CNAF.it"]
submit = sd._allowedToSubmit("aQueue", True, set(["LCG.CERN.cern"]), set())
assert submit is True
sd.rssFlag = True
submit = sd._allowedToSubmit("aQueue", True, set(["LCG.CERN.cern"]), set())
assert submit is False
sd.ceMaskList = ["aCE", "anotherCE"]
submit = sd._allowedToSubmit("aQueue", True, set(["LCG.CERN.cern"]), set())
assert submit is True
def test__submitPilotsToQueue(sd):
"""Testing SiteDirector()._submitPilotsToQueue()"""
# Create a MagicMock that does not have the workingDirectory
# attribute (https://cpython-test-docs.readthedocs.io/en/latest/library/unittest.mock.html#deleting-attributes)
# This is to use the SiteDirector's working directory, not the CE one
ceMock = MagicMock()
del ceMock.workingDirectory
sd.queueCECache = {"aQueue": {"CE": ceMock}}
sd.queueSlots = {"aQueue": {"AvailableSlots": 10}}
assert sd._submitPilotsToQueue(1, MagicMock(), "aQueue")["OK"]
@pytest.mark.parametrize(
"pilotRefs, pilotDict, pilotCEDict, expected",
[
([], {}, {}, (0, [])),
(
["aPilotRef"],
{"aPilotRef": {"Status": "Running", "LastUpdateTime": datetime.datetime(2000, 1, 1).utcnow()}},
{},
(0, []),
),
(
["aPilotRef"],
{"aPilotRef": {"Status": "Running", "LastUpdateTime": datetime.datetime(2000, 1, 1).utcnow()}},
{"aPilotRef": "Running"},
(0, []),
),
(
["aPilotRef"],
{"aPilotRef": {"Status": "Running", "LastUpdateTime": datetime.datetime(2000, 1, 1).utcnow()}},
{"aPilotRef": "Unknown"},
(0, []),
),
],
)
def test__updatePilotStatus(sd, pilotRefs, pilotDict, pilotCEDict, expected):
"""Testing SiteDirector()._updatePilotStatus()"""
res = sd._updatePilotStatus(pilotRefs, pilotDict, pilotCEDict)
assert res == expected
|
DIRACGrid/DIRAC
|
src/DIRAC/WorkloadManagementSystem/Agent/test/Test_Agent_SiteDirector.py
|
Python
|
gpl-3.0
| 7,215
|
[
"DIRAC"
] |
8f74571b0ac8f863e2c3934a521cf2c5cb285e1ba8c41b01470488d93167f9ce
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals
from __future__ import absolute_import
"""
This module provides a base class, SQTensor, and associated methods for
creating and manipulating square rank 2 tensors
"""
__author__ = "Maarten de Jong, Joseph Montoya"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Wei Chen, Mark Asta, Anubhav Jain"
__version__ = "1.0"
__maintainer__ = "Maarten de Jong"
__email__ = "maartendft@gmail.com"
__status__ = "Development"
__date__ = "March 22, 2012"
from scipy.linalg import polar
import numpy as np
class SQTensor(np.ndarray):
"""
Base class for doing useful general operations on *square* second order
tensors, without restrictions on what type (stress, elastic, strain etc.).
"""
def __new__(cls, input_array):
"""
Create a SQTensor object. Note that the constructor uses __new__
rather than __init__ according to the standard method of
subclassing numpy ndarrays. Error is thrown when the class is
initialized with non-square matrix.
Args:
stress_matrix (3x3 array-like): the 3x3 array-like
representing the Green-Lagrange strain
"""
obj = np.asarray(input_array).view(cls)
if not (len(obj.shape) == 2 and obj.shape[0] == obj.shape[1]):
raise ValueError("SQTensor only takes 2-D "
"square array-likes as input")
return obj
def __array_finalize__(self, obj):
if obj is None:
return
def __array_wrap__(self, obj):
"""
Overrides __array_wrap__ methods in ndarray superclass to avoid errors
associated with functions that return scalar values
"""
if len(obj.shape) == 0:
return obj[()]
else:
return np.ndarray.__array_wrap__(self, obj)
def __hash__(self):
"""
define a hash function, since numpy arrays
have their own __eq__ method
"""
return hash(self.tostring())
def __repr__(cls):
return "{}({})".format(cls.__class__.__name__,
cls.__str__())
@property
def trans(self):
"""
shorthand for transpose on SQTensor
"""
return SQTensor(np.transpose(self))
@property
def inv(self):
"""
shorthand for matrix inverse on SQTensor
"""
if self.det == 0:
raise ValueError("SQTensor is non-invertible")
return SQTensor(np.linalg.inv(self))
@property
def det(self):
"""
shorthand for the determinant of the SQTensor
"""
return np.linalg.det(self)
def is_symmetric(self, tol=1e-5):
"""
Test to see if tensor is symmetric to a user-defined tolerance.
This is determined by subtracting the transpose; if any of the
resultant elements are above the specified tolerance, returns
False. Otherwise returns true.
Args:
tol (float): tolerance to symmetry test
"""
return (np.abs(self - self.trans) < tol).all()
def is_rotation(self, tol=1e-5):
"""
Test to see if tensor is a valid rotation matrix, performs a
test to check whether the inverse is equal to the transpose
and if the determinant is equal to one within the specified
tolerance
Args:
tol (float): tolerance to both tests of whether the
the determinant is one and the inverse is equal
to the transpose
"""
return (np.abs(self.inv - self.trans) < tol).all() \
and (np.linalg.det(self) - 1. < tol)
@property
def symmetrized(self):
"""
Returns a symmetrized matrix from the input matrix,
calculated by taking the sum of the matrix and its
transpose
"""
return 0.5 * (self + self.trans)
def rotate(self, rotation):
"""
Returns a rotated tensor based on input of a another
rotation tensor.
Args:
rotation (3x3 array-like): rotation tensor, is tested
for rotation properties and then operates on self
"""
if self.shape != (3, 3):
raise NotImplementedError("Rotations are only implemented for "
"3x3 tensors.")
rotation = SQTensor(rotation)
if not rotation.is_rotation():
raise ValueError("Specified rotation matrix is invalid")
return np.dot(rotation, np.dot(self, rotation.trans))
def get_scaled(self, scale_factor):
"""
Scales the tensor by a certain multiplicative scale factor
Args:
scale_factor (float): scalar multiplier to be applied to the
SQTensor object
"""
return SQTensor(self * scale_factor)
@property
def principal_invariants(self):
"""
Returns a list of principal invariants for the tensor,
which are the values of the coefficients of the characteristic
polynomial for the matrix
"""
if self.shape == (3, 3):
return np.poly(self)[1:]*np.array([-1, 1, -1])
else:
raise ValueError("Principal invariants is only intended for use "
"with 3x3 SQTensors")
def polar_decomposition(self, side='right'):
"""
calculates matrices for polar decomposition
"""
return polar(self, side=side)
def zeroed(self, tol=1e-5):
"""
returns the matrix with all entries below a certain threshold
(i.e. tol) set to zero
"""
new_tensor = self.copy()
new_tensor[new_tensor < tol] = 0
return new_tensor
|
migueldiascosta/pymatgen
|
pymatgen/analysis/elasticity/tensors.py
|
Python
|
mit
| 5,963
|
[
"pymatgen"
] |
4c636bf608b5e304f4d608355566b2e150a5246eb05784df21c973d9807d71d1
|
"""
test_gaussian.py: Test suite for the Gaussian estimator class :class:`GaussEst`
"""
from __future__ import print_function
import unittest
import numpy as np
# Add the path to the vampyre package and import it
import env
env.add_vp_path()
import vampyre as vp
def lin_test(zshape=(500,10),Ashape=(1000,500),verbose=False,tol=0.1):
"""
Unit test for the linear estimator class
The test is performed by generating random data
:math:`y=Az+w, z \\sim {\\mathcal N}(r, \\tau_r I),
w \\sim {\\mathcal N}(0, \\tau_w I)`
Then the method estimates :math:`z` from :math:`y`
and compares the expected and measured errors.
:param zshape: shape of :math:`z`
:param Ashape: shape of :A:`z`. This must be consistent with
:code:`zshape`.
:param Boolenan verbose: print results
:param tol: error tolerance above which test is considered
to fail.
"""
# Generate random parameters
rvar = 10**(np.random.uniform(-1,1,1))[0]
wvar = 10**(np.random.uniform(-1,1,1))[0]
# Generate random matrix
A = np.random.normal(0,1,Ashape)/np.sqrt(Ashape[1])
Aop = vp.trans.MatrixLT(A, zshape)
yshape = Aop.shape1
# Add noise on input and output
r = np.random.normal(0,1,zshape)
z = r + np.random.normal(0,np.sqrt(rvar),zshape)
y = A.dot(z) + np.random.normal(0,np.sqrt(wvar),yshape)
# Construct the linear estimator
est = vp.estim.LinEst(Aop,y,wvar,var_axes='all')
# Perform the initial estimate. This is just run to make sure it
# doesn't crash
zhat, zhatvar, cost = est.est_init(return_cost=True)
if (zhat.shape != r.shape):
raise vp.common.TestException(\
"est_init does not produce the correct shape")
# Posterior estimate
zhat, zhatvar, cost = est.est(r,rvar,return_cost=True)
zerr = np.mean(np.abs(z-zhat)**2)
fail = (np.abs(zerr-zhatvar) > tol*np.abs(zhatvar))
if verbose or fail:
print("\nPosterior: True: {0:f} Est:{1:f}".format(zerr,zhatvar))
if fail:
raise vp.common.TestException("Posterior estimate Gaussian error "+
" does not match predicted value")
class TestCases(unittest.TestCase):
def test_linear(self):
lin_test(zshape=(500,10),Ashape=(1000,500))
lin_test(zshape=(500,),Ashape=(1000,500),tol=0.5)
lin_test(zshape=(500,10),Ashape=(250,500))
if __name__ == '__main__':
unittest.main()
|
GAMPTeam/vampyre
|
test/test_estim/test_linear.py
|
Python
|
mit
| 2,552
|
[
"Gaussian"
] |
586aa8b3c7624114a45df66469cb2eacfb4d8c95d3de6174148003060e8bedf1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to reload Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_reload
author: "Dave Kasberg (@dkasberg)"
short_description: Perform switch restart on devices running Lenovo CNOS
description:
- This module allows you to restart the switch using the current startup configuration.
The module is usually invoked after the running configuration has been saved over the startup configuration.
This module uses SSH to manage network device configuration.
The results of the operation can be viewed in results directory.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_reload.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options: {}
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_reload. These are written in the main.yml file of the tasks directory.
---
- name: Test Reload
cnos_reload:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_reload_{{ inventory_hostname }}_output.txt"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "Device is Reloading. Please wait..."
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
cliCommand = "reload \n"
outputfile = module.params['outputfile']
hostIP = module.params['host']
deviceType = module.params['deviceType']
output = ""
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Send the CLi command
output = output + cnos.waitForDeviceResponse(cliCommand, "(y/n):", 2, remote_conn)
# Send the Confirmation y
output = output + cnos.waitForDeviceResponse("y\n", "#", 2, remote_conn)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
errorMsg = cnos.checkOutputForError(output)
if(errorMsg in "Device Response Timed out"):
module.exit_json(changed=True, msg="Device is Reloading. Please wait...")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/lenovo/cnos_reload.py
|
Python
|
bsd-3-clause
| 4,936
|
[
"VisIt"
] |
ba139d4eadeeb13bd200cf8af31034e3cd49bbf7fbe53ba36898a0ffe4205114
|
import ocl
import camvtk
import time
import vtk
import datetime
import math
import random
def drawVertex(myscreen, p, vertexColor, rad=1):
myscreen.addActor( camvtk.Sphere( center=(p.x,p.y,p.z), radius=rad, color=vertexColor ) )
def drawEdge(myscreen, e, edgeColor=camvtk.yellow):
p1 = e[0]
p2 = e[1]
myscreen.addActor( camvtk.Line( p1=( p1.x,p1.y,p1.z), p2=(p2.x,p2.y,p2.z), color=edgeColor ) )
def drawFarCircle(myscreen, r, circleColor):
myscreen.addActor( camvtk.Circle( center=(0,0,0), radius=r, color=circleColor ) )
def drawDiagram( myscreen, vd ):
drawFarCircle(myscreen, vd.getFarRadius(), camvtk.pink)
for v in vd.getGenerators():
drawVertex(myscreen, v, camvtk.green, 2)
for v in vd.getVoronoiVertices():
drawVertex(myscreen, v, camvtk.red, 1)
for v in vd.getFarVoronoiVertices():
drawVertex(myscreen, v, camvtk.pink, 10)
vde = vd.getVoronoiEdges()
print " got ",len(vde)," Voronoi edges"
for e in vde:
drawEdge(myscreen,e, camvtk.cyan)
class VD:
def __init__(self, myscreen, vd):
self.myscreen = myscreen
self.gen_pts=[ocl.Point(0,0,0)]
self.generators = camvtk.PointCloud(pointlist=self.gen_pts)
self.verts=[]
self.far=[]
self.edges =[]
self.generatorColor = camvtk.green
self.vertexColor = camvtk.red
self.edgeColor = camvtk.cyan
self.vdtext = camvtk.Text()
self.vdtext.SetPos( (50, myscreen.height-50) )
self.Ngen = 0
self.vdtext_text = ""
self.setVDText(vd)
myscreen.addActor(self.vdtext)
def setVDText(self, vd):
self.Ngen = len( vd.getGenerators() )-3
self.vdtext_text = "VD with " + str(self.Ngen) + " generators."
self.vdtext.SetText( self.vdtext_text )
def setGenerators(self, vd):
if len(self.gen_pts)>0:
myscreen.removeActor( self.generators )
#self.generators=[]
self.gen_pts = []
for p in vd.getGenerators():
self.gen_pts.append(p)
self.generators= camvtk.PointCloud(pointlist=self.gen_pts)
self.generators.SetPoints()
myscreen.addActor(self.generators)
#self.generators = []
#for p in vd.getGenerators():
# gactor = camvtk.Sphere( center=(p.x,p.y,p.z), radius=0.05, color=self.generatorColor )
# self.generators.append(gactor)
# myscreen.addActor( gactor )
self.setVDText(vd)
myscreen.render()
def setFar(self, vd):
for p in vd.getFarVoronoiVertices():
myscreen.addActor( camvtk.Sphere( center=(p.x,p.y,p.z), radius=4, color=camvtk.pink ) )
myscreen.render()
def setVertices(self, vd):
for p in self.verts:
myscreen.removeActor(p)
#p.Delete()
self.verts = []
for p in vd.getVoronoiVertices():
actor = camvtk.Sphere( center=(p.x,p.y,p.z), radius=0.2, color=self.vertexColor )
self.generators.append(actor)
myscreen.addActor( actor )
myscreen.render()
def setEdges(self, vd):
self.edges = []
self.edges = vd.getEdgesGenerators()
self.epts = vtk.vtkPoints()
nid = 0
lines=vtk.vtkCellArray()
for e in self.edges:
p1 = e[0]
p2 = e[1]
self.epts.InsertNextPoint( p1.x, p1.y, p1.z)
self.epts.InsertNextPoint( p2.x, p2.y, p2.z)
line = vtk.vtkLine()
line.GetPointIds().SetId(0,nid)
line.GetPointIds().SetId(1,nid+1)
nid = nid+2
lines.InsertNextCell(line)
linePolyData = vtk.vtkPolyData()
linePolyData.SetPoints(self.epts)
linePolyData.SetLines(lines)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(linePolyData)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor( camvtk.cyan )
myscreen.addActor( actor )
#myscreen.removeActor(e)
#e.Delete()
#self.edges = []
#for e in vd.getEdgesGenerators():
# ofset = 0
# p1 = e[0] + ofset*e[2]
# p2 = e[1] + ofset*e[2]
# actor = camvtk.Line( p1=( p1.x,p1.y,p1.z), p2=(p2.x,p2.y,p2.z), color=self.edgeColor )
# myscreen.addActor(actor)
# self.edges.append(actor)
#actor1 = camvtk.Sphere( center=(p1.x,p1.y,p1.z), radius=2, color=camvtk.pink )
#actor2 = camvtk.Sphere( center=(p2.x,p2.y,p2.z), radius=2, color=camvtk.lgreen )
#myscreen.addActor(actor1)
#self.edges.append(actor1)
#myscreen.addActor(actor2)
#self.edges.append(actor2)
myscreen.render()
def setAll(self, vd):
self.setGenerators(vd)
#self.setFar(vd)
#self.setVertices(vd)
self.setEdges(vd)
def addVertexSlow(myscreen, vd, vod, p):
pass
if __name__ == "__main__":
print ocl.revision()
myscreen = camvtk.VTKScreen()
myscreen.camera.SetPosition(0.0000001, 0, 0.0001) # 1200 for far view, 300 for circle view
myscreen.camera.SetFocalPoint(0, 0, 0)
myscreen.camera.SetClippingRange(-1,1)
camvtk.drawOCLtext(myscreen)
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
#w2if.Modified()
#lwr.SetFileName("tux1.png")
myscreen.render()
random.seed(42)
far = 0.000010
vd = ocl.VoronoiDiagram(far,1200)
vod = VD(myscreen,vd)
#vod.setAll(vd)
drawFarCircle(myscreen, vd.getFarRadius(), camvtk.orange)
#plist=[ocl.Point(61,61) ]
#plist.append(ocl.Point(-20,-20))
#plist.append(ocl.Point(0,0))
Nmax = 191
# far = 0.000010 crashes at n=192
plist=[]
for n in range(Nmax):
x=-far/2+far*random.random()
y=-far/2+far*random.random()
plist.append( ocl.Point(x,y) )
n=1
t_before = time.time()
for p in plist:
#vod.setAll(vd)
#time.sleep(0.033)
print "PYTHON: adding generator: ",n," at ",p
vd.addVertexSite( p )
#vod.setAll(vd)
#w2if.Modified()
#lwr.SetFileName("frames/vd500_"+ ('%05d' % n)+".png")
#lwr.Write()
n=n+1
t_after = time.time()
calctime = t_after-t_before
print " VD done in ", calctime," s, ", calctime/Nmax," s per generator"
vod.setAll(vd)
#vod.setGenerators(vd)
#time.sleep(1)
#vod.setVertices(vd)
#vod.setEdges(vd)
#vd.addVertexSite( ocl.Point(0,-20) )
#vod.setGenerators(vd)
#time.sleep(1)
#vod.setVertices(vd)
#vod.setEdges(vd)
#vd.addVertexSite( ocl.Point(20,20) )
#drawDiagram( myscreen, vd )
#dle = vd.getDelaunayEdges()
#print " got ",len(dle)," Delaunay edges"
#for e in dle:
# drawEdge(myscreen,e, camvtk.red)
print "PYTHON All DONE."
#camvtk.drawArrows(myscreen,center=(-0.5,-0.5,-0.5))
myscreen.render()
myscreen.iren.Start()
|
AlanZatarain/opencamlib
|
scripts/voronoi/voronoi_4.py
|
Python
|
gpl-3.0
| 7,252
|
[
"VTK"
] |
b0cf7c1c7dab5dbebb9060fff978cf01529c20f3ecfe5c96f4a1e4bea8bb18bb
|
import copy
import pymc3 as pm
import numpy as np
import seaborn as sns
import theano
import theano.tensor
import matplotlib.pyplot as plt
from Unfolder.Histogram import H1D, H2D, plotH1D, plotH2D, plotH1DWithText, plotH2DWithText, plotH1DLines
from Unfolder.ComparisonHelpers import getDataFromModel
from scipy import stats
from scipy import optimize
# FIXME
# Change this to estimate the mode using Sympy
# Useful as a cross check
doSymbolicMode = False
try:
import sympy
except:
doSymbolicMode = False
theano.config.compute_test_value = 'warn'
'''
Shows reference for Full Bayesian Unfolding to encourage people to give credit for the original
work (which is not mine!).
'''
def printReferences():
print("This software has been produced using the ideas put forth in:")
print("Choudalakis, G., ``Fully Bayesian Unfolding'', physics.data-an:1201.4612, https://arxiv.org/abs/1201.4612")
print("Please cite it if you plan to publish this.")
print("More information on the software itself can be found in https://github.com/daniloefl/Unfolder")
print("")
printReferences()
'''
Class that uses Fully Bayesian Unfolding to
unfold a distribution using NUTS to sample the prior distribution.
'''
class Unfolder:
'''
Constructor of unfolding class
bkg is a list or array with the contents of the background histogram
mig is a 2D array such that mig[i, j] contains the number of events in bin i at truth level
that are reconstructed in bin j at reco level
eff is a list or array with the contents of the efficiency histogram, defined as:
eff[i] = (1 - (# events in truth bin i that fail reco)/(# events in truth bin i))
truth is the particle level histogram. If it is None, it is calculated from the migration
matrix mig and the efficiency, but if the efficiency has been defined as 1 - (not reco & truth)/truth
instead of (reco&truth)/truth, the error propagation will be incorrect.
'''
def __init__(self, bkg, mig, eff, truth = None):
self.nll = 0
self.bkg = H1D(bkg) # background
self.Nr = mig.shape[1] # number of reco bins
self.Nt = mig.shape[0] # number of truth bins
self.mig = H2D(mig) # joint probability
self.eff = H1D(eff) # efficiency
# calculate response matrix, defined as response[i, j] = P(r = j|t = i) = P(t = i, r = j)/P(t = i)
self.response = H2D(mig)
self.response_noeff = H2D(mig) # response matrix if one assumes efficiency = 1
for i in range(0, self.Nt): # for each truth bin
rsum = 0.0
esum = 0.0
for j in range(0, self.Nr): # for each reco bin
rsum += self.mig.val[i, j] # calculate the sum of all reco bins in the same truth bin
esum += self.mig.err[i, j]
# rsum is now the total sum of events that has that particular truth bin
# now, for each reco bin in truth bin i, divide that row by the total number of events in it
# and multiply the response matrix by the efficiency
for j in range(0, self.Nr):
self.response.val[i, j] = self.mig.val[i, j]/rsum*self.eff.val[i] # P(r|t) = P(t, r)/P(t) = Mtr*eff(t)/sum_k=1^Nr Mtk
self.response.err[i, j] = 0 # FIXME
self.response_noeff.val[i, j] = self.mig.val[i, j]/rsum
self.response_noeff.err[i, j] = 0 # FIXME
# if the prior is later chosen to be non-uniform, also calculate the truth distribution
# so that the prior can be set according to it, if necessary
# also useful to make plots
# truth dist(i)*eff(i) = P(t=i and it is reconstructed)
if truth == None:
self.truth = self.mig.project('x').divideWithoutErrors(self.eff)
else:
self.truth = H1D(truth)
self.recoWithoutFakes = self.mig.project('y') # reco dist(j) = P(r=j and it is in truth)
self.prior = "uniform"
self.priorAttributes = {}
# for systematic uncertainties
self.bkg_syst = {}
self.reco_syst = {}
self.systematics = []
self.response_unfsyst = {}
self.bkg_unfsyst = {}
self.unf_systematics = []
self.prior_unfsyst = {}
self.fb = 0
self.constrainArea = False
self.tot_bkg = self.bkg.integral()[0]
self.ave_eff = self.recoWithoutFakes.integral()[0]/self.truth.integral()[0]
'''
Use a Gaussian prior with bin-by-bin width given by widths and mean given by means.
If either widths or means is set to None, the truth distribution is used with
widths given by the square root of the bin contents.
'''
def setGaussianPrior(self, widths = None, means = None):
if means == None:
self.priorAttributes['bias'] = copy.deepcopy(self.truth.val)
else:
self.priorAttributes['bias'] = copy.deepcopy(means)
if widths == None:
self.priorAttributes['sd'] = copy.deepcopy(self.truth.err)**0.5
else:
self.priorAttributes['sd'] = copy.deepcopy(widths)
self.prior = "gaussian"
self.fb = 0
'''
Use an entropy-based prior.
'''
def setEntropyPrior(self):
self.prior = "entropy"
'''
Use a curvature-based prior.
'''
def setCurvaturePrior(self, fb = 1, means = None):
self.prior = "curvature"
if means == None:
self.priorAttributes['bias'] = copy.deepcopy(self.truth.val)
else:
self.priorAttributes['bias'] = copy.deepcopy(means)
self.fb = fb
'''
Use a first derivative-based prior.
'''
def setFirstDerivativePrior(self, fb = 1, means = None):
self.prior = "first derivative"
if means == None:
self.priorAttributes['bias'] = copy.deepcopy(self.truth.val)
else:
self.priorAttributes['bias'] = copy.deepcopy(means)
self.fb = fb
'''
Whether to constrain normalisation.
'''
def setConstrainArea(self, value = True):
self.constrainArea = value
'''
Add systematic uncertainty at reconstruction level.
To add uncertainty due to the unfolding factors, one can proceed as follows, taking the difference between
the reconstructed-level distributions using the alternative migration matrices or the nominal.
from Unfolder.Histogram import getNormResponse
addUncertainty(name, nominalBackground, np.dot(nominalTruth.val, getNormResponse(alternativeMigration, alternativeEfficiency).val))
Where Nr is the number of reconstruction-level bins. The first term keeps the effect in the background at zero so that the background
is not correlated with this uncertainty.
The second term is the reconstruction-level histogram one would get if the nominal truth is folded with an alternative unfolding factor.
'''
def addUncertainty(self, name, bkg, reco):
self.bkg_syst[name] = H1D(bkg) - self.bkg
self.reco_syst[name] = H1D(reco) - self.recoWithoutFakes
self.systematics.append(name)
'''
Add uncertainty in the core of the unfolding factors.
Note that this creates a non-linearity in the model.
Analytical calculations show that if one has a difference
in efficiencies between the nominal and other models, this creates
an extra inflection point in the likelihood at nuisance parameter = - (efficiency difference)/(nominal efficiency).
At this point, the likelihood is exactly zero for a one bin analytical calculation.
If this point is in the range sampled for the nuisance parameter, it will cause an asymmetry in the
posterior of the nuisance parameter, which will shift the mean of the posterior of the
unfolded distributions.
It is recommended to add a linear uncertainty by calling:
from Unfolder.Histogram import getNormResponse
addUncertainty(name, nominalBackground, np.dot(nominalTruth.val, getNormResponse(alternativeMigration, alternativeEfficiency).val))
Where Nr is the number of reconstruction-level bins. The first term keeps the effect in the background at zero so that the background
is not correlated with this uncertainty.
The second term is the reconstruction-level histogram one would get if the nominal truth is folded with an alternative unfolding factor.
The prior can be "lognormal" or "normal" to choose what the prior on the nuisance parameter would be.
A lognormal prior generates a posterior with a similar mean and mode, leading to a more natural interpretation of the marginal mean.
'''
def addUnfoldingUncertainty(self, name, bkg, mig, eff, prior = "gaussian"):
# calculate response matrix, defined as response[i, j] = P(r = j|t = i) = P(t = i, r = j)/P(t = i)
self.response_unfsyst[name] = H2D(mig)
self.bkg_unfsyst[name] = H1D(bkg)
for i in range(0, self.Nt): # for each truth bin
rsum = 0.0
for j in range(0, self.Nr): # for each reco bin
rsum += mig.val[i, j] # calculate the sum of all reco bins in the same truth bin
# rsum is now the total sum of events that has that particular truth bin
# now, for each reco bin in truth bin i, divide that row by the total number of events in it
# and multiply the response matrix by the efficiency
for j in range(0, self.Nr):
self.response_unfsyst[name].val[i, j] = mig.val[i, j]/rsum*eff.val[i] # P(r|t) = P(t, r)/P(t) = Mtr*eff(t)/sum_k=1^Nr Mtk
# keep the same efficiency (FIXME -- only uncomment for tests)
#self.response_unfsyst[name].val[i, j] = mig.val[i, j]/rsum*self.eff.val[i] # P(r|t) = P(t, r)/P(t) = Mtr*eff(t)/sum_k=1^Nr Mtk
self.response_unfsyst[name].err[i, j] = 0 # FIXME
self.unf_systematics.append(name)
self.prior_unfsyst[name] = prior
'''
Set a uniform prior.
'''
def setUniformPrior(self, capAtZero = True):
self.prior = "flat"
if capAtZero:
self.prior = "uniform"
self.fb = 0
'''
Transforms an array of doubles into a Theano-type array
So that it can be used in the model
'''
def asMat(self, x):
return np.asarray(x,dtype=theano.config.floatX)
'''
Create the model
and store it in self.model
the prior is the unfolded distribution
The idea is that the prior is sampled with toy experiments and the
reconstructed variable R_j = \sum_i truth_i * response_ij + background_j
is constrained to be the observed data
under such conditions, the posterior converges to the unfolded distribution
'''
def run(self, data):
self.data = H1D(data) # copy data
self.datasubbkg = self.data - self.bkg # For monitoring: data - bkg
# for the uniform prior, capping at zero
self.minT = 0
self.maxT = 10*np.amax(self.truth.val)
self.model = pm.Model() # create the model
with self.model: # all in this scope is in the model's context
self.var_alpha = theano.shared(value = 1.0, borrow = False)
self.var_data = theano.shared(value = self.data.val, borrow = False) # in case one wants to change data
# Define the prior
if self.prior == "gaussian":
#self.T = pm.Normal('Truth', mu = self.priorAttributes['mean'], sd = self.priorAttributes['sd'], shape = (self.Nt))
self.T = pm.DensityDist('Truth', logp = lambda val: -self.var_alpha*0.5*theano.tensor.sqr((val - self.priorAttributes['bias'])/self.priorAttributes['sd']).sum(), shape = (self.Nt), testval = self.truth.val)
elif self.prior == "entropy":
self.T = pm.DensityDist('Truth', logp = lambda val: -self.var_alpha*((val/val.sum())*theano.tensor.log(val/val.sum())).sum(), shape = (self.Nt), testval = self.truth.val)
elif self.prior == "curvature":
self.T = pm.DensityDist('Truth', logp = lambda val: -self.var_alpha*theano.tensor.sqr(theano.tensor.extra_ops.diff(theano.tensor.extra_ops.diff((val - self.fb*self.priorAttributes['bias'])))).sum(), shape = (self.Nt), testval = self.truth.val)
elif self.prior == "first derivative":
self.T = pm.DensityDist('Truth', logp = lambda val: -self.var_alpha*theano.tensor.abs_(theano.tensor.extra_ops.diff(theano.tensor.extra_ops.diff((val - self.fb*self.priorAttributes['bias'])/(self.truth.x_err*2))/np.diff(self.truth.x))/(2*theano.tensor.mean(theano.tensor.extra_ops.diff(val/(self.truth.x_err*2))/np.diff(self.truth.x)))).sum(), shape = (self.Nt), testval = self.truth.val)
elif self.prior == "uniform": # if using uniform, cap at zero
self.T = pm.Uniform('Truth', self.minT, self.maxT, shape = (self.Nt), testval = self.truth.val)
else: # if none of the names above matched, assume it is uniform
self.T = pm.Flat('Truth', shape = (self.Nt), testval = self.truth.val)
self.theta = {}
self.unf_theta = {}
for name in self.unf_systematics:
if self.prior_unfsyst[name] == "lognormal":
self.unf_theta[name] = pm.Lognormal('tu_'+name, mu = 0, sd = 0.69, testval = 1)
elif self.prior_unfsyst[name] == 'categorical':
self.unf_theta[name] = pm.Categorical('tu_'+name, p = [0.67, 0.33], testval = 0)
else:
self.unf_theta[name] = pm.Normal('tu_'+name, mu = 0, sd = 1, testval = 0)
#self.unf_theta[name] = pm.Laplace('tu_'+name, mu = 0, b = (2.0)**(-0.5), testval = 0)
for name in self.systematics:
self.theta[name] = pm.Normal('t_'+name, mu = 0, sd = 1, testval = 0)
self.var_bkg = theano.shared(value = self.asMat(self.bkg.val))
self.var_response = theano.shared(value = self.asMat(self.response.val))
self.R = theano.tensor.dot(self.T, self.var_response)
self.var_response_unfsyst = {}
self.var_response_alt_unfsyst = {}
self.var_bkg_unfsyst = {}
for name in self.unf_systematics:
self.var_response_unfsyst[name] = theano.shared(value = self.asMat(self.response_unfsyst[name].val - self.response.val))
self.var_response_alt_unfsyst[name] = theano.shared(value = self.asMat(self.response_unfsyst[name].val))
self.var_bkg_unfsyst[name] = theano.shared(value = self.asMat(self.bkg_unfsyst[name].val - self.bkg.val))
self.var_bkg_syst = {}
self.var_reco_syst = {}
self.R_syst = {}
for name in self.systematics:
# get background constribution
self.var_bkg_syst[name] = theano.shared(value = self.asMat(self.bkg_syst[name].val))
# calculate differential impact in the reco result
self.var_reco_syst[name] = theano.shared(value = self.asMat(self.reco_syst[name].val))
self.R_syst[name] = self.var_reco_syst[name] + self.var_bkg_syst[name]
# reco result including systematics:
self.R_full = 0
for name in self.unf_systematics:
# add it to the total reco result
if self.prior_unfsyst[name] == 'lognormal':
self.R_full += theano.tensor.dot((1 - self.unf_theta[name])*self.T, self.var_response_unfsyst[name])
self.R_full += (1 - self.unf_theta[name])*self.var_bkg_unfsyst[name]
elif self.prior_unfsyst[name] == 'categorical':
self.R_full += theano.tensor.dot(1.0*self.unf_theta[name]*self.T, self.var_response_alt_unfsyst[name])
self.R_full += self.unf_theta[name]*self.var_bkg_unfsyst[name]
self.R = theano.tensor.dot(self.R, (1.0 - self.unf_theta[name]))
else:
self.R_full += theano.tensor.dot(self.unf_theta[name]*self.T, self.var_response_unfsyst[name])
self.R_full += self.unf_theta[name]*self.var_bkg_unfsyst[name]
for name in self.systematics:
# add it to the total reco result
self.R_full += self.theta[name]*self.R_syst[name]
self.R_full += self.R
# cut-off at 0 to create efficiency boundaries
#self.R_full = theano.tensor.maximum(self.R_full, 0)
self.R_full += self.var_bkg
self.U = pm.Poisson('U', mu = self.R_full, observed = self.var_data, shape = (self.Nr, 1))
#self.U = pm.Normal('U', mu = self.R_full, sd = theano.tensor.sqrt(self.R_full), observed = self.var_data, shape = (self.Nr, 1))
if self.constrainArea:
self.Norm = pm.Poisson('Norm', mu = self.T.sum()*self.ave_eff + self.tot_bkg, observed = self.var_data.sum(), shape = (1))
# define symbolic negative log-likelihood
if doSymbolicMode:
self.symb_data = []
self.symb_T = []
self.symb_theta = {}
self.symb_R = [0 for k in range(self.Nr)]
for i in range(self.Nr):
self.symb_data.append(sympy.symbols('data_%d' % i))
for i in range(self.Nt):
self.symb_T.append(sympy.symbols('T_%d' % i))
for name in self.systematics:
self.symb_theta[name] = sympy.symbols('theta_%s' % name)
for name in self.unf_systematics:
self.symb_theta[name] = sympy.symbols('theta_%s' % name)
for i in range(self.Nr):
for k in range(self.Nt):
self.symb_R[i] += self.symb_T[k] * self.response.val[k, i]
self.symb_R[i] += self.bkg.val[i]
for name in self.systematics:
self.symb_R[i] += self.symb_theta[name]*(self.reco_syst[name].val[i] + self.bkg_syst[name].val[i])
for name in self.unf_systematics:
if self.prior_unfsyst[name] == 'lognormal':
self.symb_R[i] += (1 - self.symb_theta[name])*(self.bkg_unfsyst[name].val[i] - self.bkg.val[i])
else:
self.symb_R[i] += self.symb_theta[name]*(self.bkg_unfsyst[name].val[i] - self.bkg.val[i])
for k in range(self.Nt):
if self.prior_unfsyst[name] == 'lognormal':
self.symb_R[i] += (1 - self.symb_theta[name])*self.symb_T[k]*(self.response_unfsyst[name].val[k, i] - self.response.val[k, i])
else:
self.symb_R[i] += self.symb_theta[name]*self.symb_T[k]*(self.response_unfsyst[name].val[k, i] - self.response.val[k, i])
self.dummy = sympy.symbols('dummy')
for i in range(self.Nr):
self.nll += -self.symb_data[i]*sympy.log(self.symb_R[i]) + self.symb_R[i] + sympy.summation(sympy.log(self.dummy), (self.dummy, 1, self.symb_data[i]))
for name in self.systematics:
self.nll += 0.5*self.symb_theta[name]**2 + 0.5*np.log(2*np.pi)
for name in self.unf_systematics:
if self.prior_unfsyst[name] == 'lognormal':
mu = 0
sd = 0.69
tau = sd**(-2.0)
self.nll += 0.5*tau*sympy.log(self.symb_theta[name] - mu)**2 - 0.5*np.log(tau/np.sqrt(2*np.pi)) + sympy.log(self.symb_theta[name])
else:
self.nll += 0.5*self.symb_theta[name]**2 + 0.5*np.log(2*np.pi)
print("Defined -ln(L) symbolically as:")
print(self.nll)
'''
Make theano graph used for calculation.
'''
def graph(self, fname = "graph.png"):
from theano.printing import pydotprint
pydotprint(self.model.logpt, outfile=fname, var_with_name_simple=True)
'''
import matplotlib.pyplot as plt
import networkx as nx
fig = plt.figure(figsize=(10,10))
graph = nx.DiGraph()
variables = self.model.named_vars.values()
for var in variables:
graph.add_node(var)
dist = var.distribution
for param_name in getattr(dist, "vars", []):
param_value = getattr(dist, param_name)
owner = getattr(param_value, "owner", None)
if param_value in variables:
graph.add_edge(param_value, var)
elif owner is not None:
parents = _get_all_parents(param_value)
for parent in parents:
if parent in variables:
graph.add_edge(parent, var)
pos = nx.fruchterman_reingold_layout(graph)
nx.draw_networkx(graph, pos, arrows=True, node_size=1000, node_color='w', font_size=8)
plt.axis('off')
plt.savefig(fname)
plt.close()
'''
'''
Calculate bias in each bin.
'''
def getBiasPerBinFromMAP(self, N, bkg = None, mig = None, eff = None):
if bkg == None: bkg = self.bkg
if mig == None: mig = self.mig
if eff == None: eff = self.eff
truth = mig.project('x')/eff
fitted = np.zeros((N, len(self.truth.val)))
bias = np.zeros(len(self.truth.val))
import sys
for k in range(0, N):
if k % 100 == 0:
print("getBiasFromMAP: Throwing toy experiment {0}/{1}\r".format(k, N))
sys.stdout.flush()
pseudo_data = getDataFromModel(bkg, mig, eff)
self.setData(pseudo_data)
res = pm.find_MAP(model = self.model, disp = False)
if not 'Truth' in res and self.prior == "uniform":
res["Truth"] = (self.maxT - self.minT) * np.exp(res["Truth_interval_"])/(1.0 + np.exp(res["Truth_interval_"])) + self.minT
fitted[k, :] = res["Truth"]
bias = np.mean(fitted, axis = 0)
bias_std = np.std(fitted - bias, axis = 0, ddof = 1)
plt_bias = H1D(truth)
plt_bias.val = bias
plt_bias.err = np.power(bias_std, 2)
plt_bias.err_up = np.power(bias_std, 2)
plt_bias.err_dw = np.power(bias_std, 2)
return plt_bias
'''
Calculate the sum of the bias using only the expected values.
'''
def getBiasFromMAP(self, N, bkg = None, mig = None, eff = None):
if bkg == None: bkg = self.bkg
if mig == None: mig = self.mig
if eff == None: eff = self.eff
truth = mig.project('x')/eff
fitted = np.zeros((N, len(self.truth.val)))
bias = np.zeros(len(self.truth.val))
bias_norm = np.zeros(N)
import sys
for k in range(0, N):
if k % 100 == 0:
print("getBiasFromMAP: Throwing toy experiment {0}/{1}\r".format(k, N))
sys.stdout.flush()
pseudo_data = getDataFromModel(bkg, mig, eff)
self.setData(pseudo_data)
#self.run(pseudo_data)
with self.model:
res = pm.find_MAP(disp = False)
if not 'Truth' in res and (self.prior == "uniform" or self.prior == "flat"):
res["Truth"] = (self.maxT - self.minT) * np.exp(res["Truth_interval_"])/(1.0 + np.exp(res["Truth_interval_"])) + self.minT
fitted[k, :] = res["Truth"] - truth.val
bias_norm[k] = np.sum(res['Truth'] - truth.val)
print
# systematic bias
self.setData(mig.project('y') + bkg)
with self.model:
res = pm.find_MAP(disp = False)
bias_syst = np.mean(np.abs(res["Truth"] - truth.val))
bias = np.mean(fitted, axis = 0)
bias_std = np.std(fitted, axis = 0, ddof = 1)
bias_norm_mean = np.mean(bias_norm)
bias_norm_std = np.std(bias_norm, ddof = 1)
#print("getBiasFromMAP with alpha = ", self.var_alpha.get_value(), " N = ", N, ", mean, std = ", bias, bias_std)
bias_binsum = np.mean(np.abs(bias))
bias_std_binsum = np.mean(bias_std)
bias_chi2 = np.mean(np.power(bias/bias_std, 2))
return [bias_binsum, bias_std_binsum, bias_chi2, bias_norm_mean, bias_norm_std, bias_syst]
'''
Scan alpha values to minimise bias^2 over variance.
'''
def scanAlpha(self, bkg, mig, eff, N = 1000, rangeAlpha = np.arange(0.0, 10.0, 1.0), fname = "scanAlpha.png", fname_chi2 = "scanAlpha_chi2.png", fname_norm = "scanAlpha_norm.png"):
bkp_alpha = self.var_alpha.get_value()
bias = np.zeros(len(rangeAlpha))
bias_std = np.zeros(len(rangeAlpha))
bias_chi2 = np.zeros(len(rangeAlpha))
bias_norm = np.zeros(len(rangeAlpha))
bias_norm_std = np.zeros(len(rangeAlpha))
bias_syst = np.zeros(len(rangeAlpha))
minBias = 1e10
bestAlpha = 0
bestChi2 = 0
bestI = 0
import sys
for i in range(0, len(rangeAlpha)):
print("scanAlpha: parameter = ", rangeAlpha[i], " / ", rangeAlpha[-1])
sys.stdout.flush()
self.setAlpha(rangeAlpha[i])
bias[i], bias_std[i], bias_chi2[i], bias_norm[i], bias_norm_std[i], bias_syst[i] = self.getBiasFromMAP(N, bkg, mig, eff) # only take mean values for speed
print(" -- --> scanAlpha: parameter = ", rangeAlpha[i], " / ", rangeAlpha[-1], " with chi2 = ", bias_chi2[i], ", mean and std = ", bias[i], bias_std[i])
if np.abs(bias_chi2[i] - 0.5) < minBias:
minBias = np.abs(bias_chi2[i] - 0.5)
bestChi2 = bias_chi2[i]
bestAlpha = rangeAlpha[i]
bestI = i
fig = plt.figure(figsize=(10, 10))
plt_bias = H1D(bias)
plt_bias.val = bias
plt_bias.err = np.zeros(len(rangeAlpha))
plt_bias.err_up = np.zeros(len(rangeAlpha))
plt_bias.err_dw = np.zeros(len(rangeAlpha))
plt_bias.x = rangeAlpha
plt_bias.x_err = np.zeros(len(rangeAlpha))
plt_bias_e = H1D(bias)
plt_bias_e.val = bias_std
plt_bias_e.err = np.zeros(len(rangeAlpha))
plt_bias_e.err_up = np.zeros(len(rangeAlpha))
plt_bias_e.err_dw = np.zeros(len(rangeAlpha))
plt_bias_e.x = rangeAlpha
plt_bias_e.x_err = np.zeros(len(rangeAlpha))
plt_bias_syst = H1D(bias)
plt_bias_syst.val = bias_syst
plt_bias_syst.err = np.zeros(len(rangeAlpha))
plt_bias_syst.err_up = np.zeros(len(rangeAlpha))
plt_bias_syst.err_dw = np.zeros(len(rangeAlpha))
plt_bias_syst.x = rangeAlpha
plt_bias_syst.x_err = np.zeros(len(rangeAlpha))
#plotH1DLines({r"$E_{\mathrm{bins}}[|E_{\mathrm{toys}}[\mathrm{bias}]|]$": plt_bias, r"$E_{\mathrm{bins}}[\sqrt{\mathrm{Var}_{\mathrm{toys}}[\mathrm{bias}]}]$": plt_bias_e, r"$E_{\mathrm{bins}}[|\mathrm{only \;\; syst. \;\; bias}|]$": plt_bias_syst}, r"$\alpha$", "Bias", "", fname)
plotH1DLines({r"$E_{\mathrm{bins}}[|E_{\mathrm{toys}}[\mathrm{bias}]|]$": plt_bias, r"$E_{\mathrm{bins}}[\sqrt{\mathrm{Var}_{\mathrm{toys}}[\mathrm{bias}]}]$": plt_bias_e}, r"$\alpha$", "Bias", "", fname)
plt_bias_norm = H1D(bias)
plt_bias_norm.val = bias_norm
plt_bias_norm.err = np.zeros(len(rangeAlpha))
plt_bias_norm.err_up = np.zeros(len(rangeAlpha))
plt_bias_norm.err_dw = np.zeros(len(rangeAlpha))
plt_bias_norm.x = rangeAlpha
plt_bias_norm.x_err = np.zeros(len(rangeAlpha))
plt_bias_norm_e = H1D(bias)
plt_bias_norm_e.val = bias_norm_std
plt_bias_norm_e.err = np.zeros(len(rangeAlpha))
plt_bias_norm_e.err_up = np.zeros(len(rangeAlpha))
plt_bias_norm_e.err_dw = np.zeros(len(rangeAlpha))
plt_bias_norm_e.x = rangeAlpha
plt_bias_norm_e.x_err = np.zeros(len(rangeAlpha))
plotH1DLines({r"$E_{\mathrm{toys}}[\mathrm{norm. \;\; bias}]$": plt_bias_norm, r"$\sqrt{\mathrm{Var}_{\mathrm{toys}}[\mathrm{norm. \;\; bias}]}$": plt_bias_norm_e}, r"$\alpha$", "Normalisation bias", "", fname_norm)
plt_bias_chi2 = H1D(bias_chi2)
plt_bias_chi2.val = bias_chi2
plt_bias_chi2.err = np.ones(len(rangeAlpha))*np.sqrt(float(len(self.truth.val))/float(N)) # error in chi^2 considering errors in the mean of std/sqrt(N)
plt_bias_chi2.err_up = np.ones(len(rangeAlpha))*np.sqrt(float(len(self.truth.val))/float(N)) # error in chi^2 considering errors in the mean of std/sqrt(N)
plt_bias_chi2.err_dw = np.ones(len(rangeAlpha))*np.sqrt(float(len(self.truth.val))/float(N)) # error in chi^2 considering errors in the mean of std/sqrt(N)
plt_bias_chi2.x = rangeAlpha
plt_bias_chi2.x_err = np.zeros(len(rangeAlpha))
plt_cte = H1D(plt_bias_chi2)
plt_cte.val = 0.5*np.ones(len(rangeAlpha))
plt_cte.err = np.zeros(len(rangeAlpha))
plt_cte.err_up = np.zeros(len(rangeAlpha))
plt_cte.err_dw = np.zeros(len(rangeAlpha))
plotH1DLines({r"$E_{\mathrm{bins}}[E_{\mathrm{toys}}[\mathrm{bias}]^2/\mathrm{Var}_{\mathrm{toys}}[\mathrm{bias}]]$": plt_bias_chi2, "0.5": plt_cte}, r"$\alpha$", r"Bias $\mathrm{mean}^2/\mathrm{variance}$", "", fname_chi2)
self.setAlpha(bkp_alpha)
return [bestAlpha, bestChi2, bias[bestI], bias_std[bestI], bias_norm[bestI], bias_norm_std[bestI]]
'''
Set value of alpha.
'''
def setAlpha(self, alpha):
with self.model:
self.var_alpha.set_value(float(alpha), borrow = False)
'''
Update input data.
'''
def setData(self, data):
self.data = H1D(data)
with self.model:
self.var_data.set_value(self.data.val, borrow = False)
'''
Samples the prior with N toy experiments
Saves the toys in self.trace, the unfolded
distribution mean and mode in self.hunf and self.hunf_mode respectively
the sqrt of the variance in self.hunf_err
'''
def sample(self, N = 50000):
self.N = N
with self.model:
#start = pm.find_MAP()
#step = pm.NUTS(state = start)
self.trace = pm.sample(N) #, step, start = start)
self.N = self.trace.Truth.shape[0]
#print("Number of truth samples:", self.N)
#pm.summary(self.trace)
self.hnp = H1D(np.zeros(len(self.systematics)))
self.hnpu = H1D(np.zeros(len(self.unf_systematics)))
self.hunf = H1D(self.truth)
self.hunf_median = H1D(self.truth)
self.hnp_median = H1D(np.zeros(len(self.systematics)))
self.hnpu_median = H1D(np.zeros(len(self.unf_systematics)))
self.hnp_median.x = [""]*len(self.systematics)
self.hnpu_median.x = [""]*len(self.unf_systematics)
self.hunf_mode = H1D(self.truth)
self.hnp_mode = H1D(np.zeros(len(self.systematics)))
self.hnpu_mode = H1D(np.zeros(len(self.unf_systematics)))
self.hnp_mode.x = [""]*len(self.systematics)
self.hnpu_mode.x = [""]*len(self.unf_systematics)
self.hunf_smode = H1D(self.truth)
self.hnp_smode = H1D(np.zeros(len(self.systematics)))
self.hnpu_smode = H1D(np.zeros(len(self.unf_systematics)))
self.hnp_smode.x = [""]*len(self.systematics)
self.hnpu_smode.x = [""]*len(self.unf_systematics)
x0 = np.zeros(self.Nt+len(self.systematics)+len(self.unf_systematics))
for i in range(0, self.Nt):
m = np.mean(self.trace.Truth[:, i])
s = np.std(self.trace.Truth[:, i], ddof = 1)
x0[i] = m
self.hunf.val[i] = m
self.hunf.err[i] = s**2
self.hunf.err_up[i] = s**2
self.hunf.err_dw[i] = s**2
self.hunf_median.val[i] = np.median(self.trace.Truth[:, i])
self.hunf_median.err[i] = s**2
self.hunf_median.err_up[i] = s**2
self.hunf_median.err_dw[i] = s**2
self.hnp.x = [""]*len(self.systematics)
self.hnpu.x = [""]*len(self.unf_systematics)
for k in range(0, len(self.systematics)):
m = np.mean(self.trace['t_'+self.systematics[k]])
s = np.std(self.trace['t_'+self.systematics[k]], ddof = 1)
x0[self.Nt+k] = m
self.hnp.val[k] = m
self.hnp.err[k] = s**2
self.hnp.err_up[k] = s**2
self.hnp.err_dw[k] = s**2
self.hnp.x[k] = self.systematics[k]
self.hnp.x_err[k] = 1
self.hnp_median.val[k] = np.median(self.trace['t_'+self.systematics[k]])
self.hnp_median.err[k] = s**2
self.hnp_median.err_up[k] = s**2
self.hnp_median.err_dw[k] = s**2
self.hnp_median.x[k] = self.systematics[k]
self.hnp_median.x_err[k] = 1
for k in range(0, len(self.unf_systematics)):
m = np.mean(self.trace['tu_'+self.unf_systematics[k]])
s = np.std(self.trace['tu_'+self.unf_systematics[k]], ddof = 1)
x0[self.Nt+len(self.systematics)+k] = m
self.hnpu.val[k] = m
self.hnpu.err[k] = s**2
self.hnpu.err_up[k] = s**2
self.hnpu.err_dw[k] = s**2
self.hnpu.x[k] = self.unf_systematics[k]
self.hnpu.x_err[k] = 1
self.hnpu_median.val[k] = np.median(self.trace['tu_'+self.unf_systematics[k]])
self.hnpu_median.err[k] = s**2
self.hnpu_median.err_up[k] = s**2
self.hnpu_median.err_dw[k] = s**2
self.hnpu_median.x[k] = self.unf_systematics[k]
self.hnpu_median.x_err[k] = 1
# get mode
mode = self.getPosteriorMode()
for k in range(0, self.Nt):
self.hunf_mode.val[k] = mode.x[k]
#self.hunf_mode.err[k] = mode.hess_inv.todense()[k,k]
self.hunf_mode.err[k] = mode.errMode[k]**2
self.hunf_mode.err_up[k] = mode.errModeUp[k]**2
self.hunf_mode.err_dw[k] = mode.errModeDw[k]**2
self.hunf_smode.val[k] = mode.symb_x[k]
self.hunf_smode.err[k] = mode.symb_errMode[k]**2
self.hunf_smode.err_up[k] = mode.symb_errModeUp[k]**2
self.hunf_smode.err_dw[k] = mode.symb_errModeDw[k]**2
for k in range(0, len(self.systematics)):
self.hnp_mode.val[k] = mode.x[self.Nt+k]
#self.hnp_mode.err[k] = mode.hess_inv.todense()[self.Nt+k, self.Nt+k]
self.hnp_mode.err[k] = mode.errMode[self.Nt+k]**2
self.hnp_mode.err_up[k] = mode.errModeUp[self.Nt+k]**2
self.hnp_mode.err_dw[k] = mode.errModeDw[self.Nt+k]**2
self.hnp_mode.x[k] = self.systematics[k]
self.hnp_mode.x_err[k] = 1
self.hnp_smode.val[k] = mode.symb_x[self.Nt+k]
self.hnp_smode.err[k] = mode.symb_errMode[self.Nt+k]**2
self.hnp_smode.err_up[k] = mode.symb_errModeUp[self.Nt+k]**2
self.hnp_smode.err_dw[k] = mode.symb_errModeDw[self.Nt+k]**2
self.hnp_smode.x[k] = self.systematics[k]
self.hnp_smode.x_err[k] = 1
for k in range(0, len(self.unf_systematics)):
self.hnpu_mode.val[k] = mode.x[self.Nt+len(self.systematics)+k]
#self.hnpu_mode.err[k] = mode.hess_inv.todense()[self.Nt+len(self.systematics)+k, self.Nt+len(self.systematics)+k]
self.hnpu_mode.err[k] = mode.errMode[self.Nt+len(self.systematics)+k]**2
self.hnpu_mode.err_up[k] = mode.errModeUp[self.Nt+len(self.systematics)+k]**2
self.hnpu_mode.err_dw[k] = mode.errModeDw[self.Nt+len(self.systematics)+k]**2
self.hnpu_mode.x[k] = self.unf_systematics[k]
self.hnpu_mode.x_err[k] = 1
self.hnpu_smode.val[k] = mode.symb_x[self.Nt+len(self.systematics)+k]
self.hnpu_smode.err[k] = mode.symb_errMode[self.Nt+len(self.systematics)+k]**2
self.hnpu_smode.err_up[k] = mode.symb_errModeUp[self.Nt+len(self.systematics)+k]**2
self.hnpu_smode.err_dw[k] = mode.symb_errModeDw[self.Nt+len(self.systematics)+k]**2
self.hnpu_smode.x[k] = self.unf_systematics[k]
self.hnpu_smode.x_err[k] = 1
'''
Estimate modes from GKE.
'''
def getPosteriorMode(self):
d = self.Nt + len(self.systematics) + len(self.unf_systematics)
r = self.N
value = np.zeros( (d, r) )
S = np.zeros( (d, 1) )
dS = np.zeros( (d, 1) )
for i in range(0, self.Nt):
value[i, :] = copy.deepcopy(self.trace.Truth[:, i])
m = np.mean(self.trace.Truth[:, i])
s = np.std(self.trace.Truth[:, i])
S[i, 0] = m
dS[i, 0] = s
for i in range(0, len(self.systematics)):
value[self.Nt + i, :] = copy.deepcopy(self.trace['t_'+self.systematics[i]][:])
m = np.mean(self.trace['t_'+self.systematics[i]])
s = np.std(self.trace['t_'+self.systematics[i]])
S[self.Nt + i, 0] = m
dS[self.Nt + i, 0] = s
for i in range(0, len(self.unf_systematics)):
value[self.Nt +len(self.systematics) + i, :] = copy.deepcopy(self.trace['tu_'+self.unf_systematics[i]][:])
m = np.mean(self.trace['tu_'+self.unf_systematics[i]])
s = np.std(self.trace['tu_'+self.unf_systematics[i]])
S[self.Nt + len(self.systematics) + i, 0] = m
dS[self.Nt +len(self.systematics) + i, 0] = s
pdf = stats.gaussian_kde(value)
def mpdf(x, args):
pdf = args['pdf']
p = pdf(x)
p = np.asarray(p)
p[p == 0] = 1e20
p[p > 0] = -np.log(p)
#if p > 0:
# p = -np.log(p)
#elif p == 0:
# p = 1e20
#else:
# print("Negative PDF:", p)
return p
bounds = []
for i in range(0, self.Nt+len(self.systematics)+len(self.unf_systematics)):
bounds.append((S[i, 0]-5*dS[i,0], S[i,0]+5*dS[i,0]))
for i in range(len(self.unf_systematics)):
if self.prior_unfsyst[self.unf_systematics[i]] == 'lognormal':
bounds[self.Nt+len(self.systematics)+i] = (1e-6, S[i,0]+3*dS[i,0])
args = {'pdf': pdf}
print("Start minimization with %s = %f" % (str(S), mpdf(S, args)))
res = optimize.minimize(mpdf, S, args = args, bounds = bounds, method='L-BFGS-B', options={'maxiter': 100, 'disp': True})
print(res)
# get 68% interval
# T is a list of linear spaces around the mode +/- 5 sigma
# ie: T = [np.linspace(mode1 - 5*sigma1, mode1 + 5*sigma1, Ngrid), np.linspace(mode2 - 5*sigma2, mode2 + 5*sigma2, Ngrid), ...]
T = []
Ndims = self.Nt+len(self.systematics)+len(self.unf_systematics)
Ngrid = 1000*Ndims
for i in range(0, self.Nt+len(self.systematics)+len(self.unf_systematics)):
if i >= self.Nt+len(self.systematics) and self.prior_unfsyst[self.unf_systematics[i - self.Nt+len(self.systematics)]] == 'lognormal':
T.append(np.linspace(1e-6, res.x[i]+5*dS[i,0], Ngrid))
else:
T.append(np.linspace(res.x[i]-5*dS[i,0], res.x[i]+5*dS[i,0], Ngrid))
# build mesh with coordinates
meshT = np.meshgrid(T)
# meshTpos[i, k] has the i-th coordinate for mesh point k
meshTpos = np.reshape(np.vstack(map(np.ravel, meshT)), (Ndims, -1))
# get the -ln(pdf) value for each mesh point k
meshPdf = mpdf(meshTpos, args)
# get index permutation that would order the pdf values list in increasing order of -ln(pdf)
pdfPerm = meshPdf.argsort()
# get pdf values under decreasing order (since indices above are in increasing order of -ln(pdf) )
orderedPdf = np.exp(-meshPdf[pdfPerm])
# get pdf normalisation to normalise it to 1
pdfNorm = np.sum(orderedPdf)
# get decreasingly ordered pdf values so that total integral is 1
orderedPdfNorm = np.asarray([x/pdfNorm for x in orderedPdf])
# perform cumulative sum to get cdf values, summing up most probable values first
orderedCdf = np.cumsum(orderedPdfNorm)
# get first index of the cdf, where the cumulative probability is larger than 68%
boundaryIdx = np.argwhere(orderedCdf > 0.68)[0][0]
# transpose mesh (so now i-th coordinate for mesh point k is in index (k, i) ) and get points ordered by most probable mesh point to least probable
orderedMesh = (meshTpos.T)[pdfPerm]
# get only mesh points up to 68% boundary under the ordering principle above
confidenceSurface = orderedMesh[0:boundaryIdx, :]
# get maximum and minimum values for each coordinate i
errMin = [np.amin(confidenceSurface[:, i]) for i in range(0, Ndims)]
errMax = [np.amax(confidenceSurface[:, i]) for i in range(0, Ndims)]
# get mode errors by taking differences between values
modeErr = [0.5*(errMax[k] - errMin[k]) for k in range(0, Ndims)]
modeErrUp = [errMax[k] - res.x[k] for k in range(0, Ndims)]
modeErrDw = [res.x[k] - errMin[k] for k in range(0, Ndims)]
# add this to result structure
res.errMode = modeErr
res.errModeUp = modeErrUp
res.errModeDw = modeErrDw
res.symb_x = res.x
res.symb_errMode = modeErr
res.symb_errModeUp = modeErrUp
res.symb_errModeDw = modeErrDw
print(res)
# now doing it symbolically
if doSymbolicMode:
dataTuples = []
for i in range(self.Nr):
dataTuples.append( (self.symb_data[i], int(self.data.val[i])) )
nllWithData = self.nll.subs(dataTuples)
print("-ln(L) with data substituted in:")
print(nllWithData)
xx = [self.symb_T[k] for k in range(self.Nt)] + [self.symb_theta[k] for k in self.systematics] + [self.symb_theta[k] for k in self.unf_systematics]
nllWithDataHandle = sympy.utilities.lambdify(xx, nllWithData, modules = 'numpy')
def nllWithDataProxy(zz):
return nllWithDataHandle(*zz)
jac = [nllWithData.diff(s) for s in xx]
jacHandle = [sympy.utilities.lambdify(xx, jf, modules = 'numpy') for jf in jac]
def jacProxy(zz):
return np.array([jfn(*zz) for jfn in jacHandle])
symb_res = optimize.minimize(nllWithDataProxy, S, bounds = bounds, jac = jacProxy, method='L-BFGS-B', options={'maxiter': 100, 'disp': True})
print("Symbolic minimisation:")
print(symb_res)
res.symb_x = symb_res.x
# get 68% interval
# T is a list of linear spaces around the mode +/- 2 sigma
# ie: T = [np.linspace(mode1 - 2*sigma1, mode1 + 2*sigma1, Ngrid), np.linspace(mode2 - 2*sigma2, mode2 + 2*sigma2, Ngrid), ...]
symb_T = []
for i in range(0, self.Nt+len(self.systematics)+len(self.unf_systematics)):
if i >= self.Nt+len(self.systematics) and self.prior_unfsyst[self.unf_systematics[i - self.Nt+len(self.systematics)]] == 'lognormal':
symb_T.append(np.linspace(1e-6, res.x[i]+2*dS[i,0], Ngrid))
else:
symb_T.append(np.linspace(res.symb_x[i]-2*dS[i,0], res.symb_x[i]+2*dS[i,0], Ngrid))
# build mesh with coordinates
symb_meshT = np.meshgrid(symb_T)
# meshTpos[i, k] has the i-th coordinate for mesh point k
symb_meshTpos = np.reshape(np.vstack(map(np.ravel, symb_meshT)), (Ndims, -1))
# get the -ln(pdf) value for each mesh point k
symb_meshPdf = nllWithDataProxy(symb_meshTpos)
# get index permutation that would order the pdf values list in increasing order of -ln(pdf)
symb_pdfPerm = symb_meshPdf.argsort()
# get pdf values under decreasing order (since indices above are in increasing order of -ln(pdf) )
symb_orderedPdf = np.exp(-symb_meshPdf[symb_pdfPerm])
# get pdf normalisation to normalise it to 1
symb_pdfNorm = np.sum(symb_orderedPdf)
# get decreasingly ordered pdf values so that total integral is 1
symb_orderedPdfNorm = np.asarray([x/symb_pdfNorm for x in symb_orderedPdf])
# perform cumulative sum to get cdf values, summing up most probable values first
symb_orderedCdf = np.cumsum(symb_orderedPdfNorm)
# get first index of the cdf, where the cumulative probability is larger than 68%
symb_boundaryIdx = np.argwhere(symb_orderedCdf > 0.68)[0][0]
# transpose mesh (so now i-th coordinate for mesh point k is in index (k, i) ) and get points ordered by most probable mesh point to least probable
symb_orderedMesh = (symb_meshTpos.T)[symb_pdfPerm]
# get only mesh points up to 68% boundary under the ordering principle above
symb_confidenceSurface = symb_orderedMesh[0:symb_boundaryIdx, :]
# get maximum and minimum values for each coordinate i
symb_errMin = [np.amin(symb_confidenceSurface[:, i]) for i in range(0, Ndims)]
symb_errMax = [np.amax(symb_confidenceSurface[:, i]) for i in range(0, Ndims)]
# get mode errors by taking differences between values
symb_modeErr = [0.5*(symb_errMax[k] - symb_errMin[k]) for k in range(0, Ndims)]
symb_modeErrUp = [symb_errMax[k] - res.symb_x[k] for k in range(0, Ndims)]
symb_modeErrDw = [res.symb_x[k] - symb_errMin[k] for k in range(0, Ndims)]
# add this to result structure
res.symb_errMode = symb_modeErr
res.symb_errModeUp = symb_modeErrUp
res.symb_errModeDw = symb_modeErrDw
print(res)
return res
'''
Plot the distributions for each bin regardless of the other bins
Marginalizing each bin's PDF
'''
def plotMarginal(self, fname):
fig = plt.figure(figsize=(10, 20))
m = np.mean(self.trace.Truth) + 3*np.std(self.trace.Truth)
for i in range(0, self.Nt):
ax = fig.add_subplot(self.Nt, 1, i+1, title='Truth')
sns.distplot(self.trace.Truth[:, i], kde = True, hist = True, label = "Truth bin %d" % i, ax = ax)
ax.set_title("Bin %d value" % i)
ax.set_ylabel("Probability")
ax.set_xlim([0, m])
ax.axvline(self.hunf_mode.val[i], linestyle = '--', linewidth = 1.5, color = 'r', label = 'Mode')
if doSymbolicMode:
ax.axvline(self.hunf_smode.val[i], linestyle = '-.', linewidth = 1.5, color = 'b', label = 'Mode (symbolic)')
ax.axvline(self.hunf.val[i], linestyle = ':', linewidth = 1.5, color = 'm', label = 'Marginal mean')
ax.axvline(self.hunf_median.val[i], linestyle = '-', linewidth = 1.5, color = 'k', label = 'Marginal median')
ax.legend()
plt.xlabel("Truth bin value")
plt.tight_layout()
plt.savefig("%s"%fname)
plt.close()
'''
Plot the distributions for each nuisance parameter regardless of the other bins
'''
def plotNPMarginal(self, syst, fname):
i = self.systematics.index(syst)
fig = plt.figure(figsize=(10, 10))
sns.distplot(self.trace['t_'+self.systematics[i]], kde = True, hist = True, label = self.systematics[i])
plt.axvline(self.hnp_mode.val[i], linestyle = '--', linewidth = 1.5, color = 'r', label = 'Mode')
if doSymbolicMode:
plt.axvline(self.hnp_smode.val[i], linestyle = '-.', linewidth = 1.5, color = 'b', label = 'Mode (symbolic)')
plt.axvline(self.hnp.val[i], linestyle = ':', linewidth = 1.5, color = 'm', label = 'Marginal mean')
plt.axvline(self.hnp_median.val[i], linestyle = '-', linewidth = 1.5, color = 'k', label = 'Marginal median')
plt.title(self.systematics[i])
plt.ylabel("Probability")
plt.xlim([-5, 5])
plt.xlabel("Nuisance parameter value")
plt.legend()
plt.tight_layout()
plt.savefig("%s"%fname)
plt.close()
'''
Plot the distributions for each unfolding nuisance parameter regardless of the other bins
'''
def plotNPUMarginal(self, syst, fname):
i = self.unf_systematics.index(syst)
fig = plt.figure(figsize=(10, 10))
sns.distplot(self.trace['tu_'+self.unf_systematics[i]], kde = True, hist = True, label = self.unf_systematics[i])
plt.axvline(self.hnpu_mode.val[i], linestyle = '--', linewidth = 1.5, color = 'r', label = 'Mode')
if doSymbolicMode:
plt.axvline(self.hnpu_smode.val[i], linestyle = '-.', linewidth = 1.5, color = 'b', label = 'Mode (symbolic)')
plt.axvline(self.hnpu.val[i], linestyle = ':', linewidth = 1.5, color = 'm', label = 'Marginal mean')
plt.axvline(self.hnpu_median.val[i], linestyle = '-', linewidth = 1.5, color = 'k', label = 'Marginal median')
plt.title(self.unf_systematics[i])
plt.ylabel("Probability")
if self.prior_unfsyst[syst] == 'lognormal':
plt.xlim([0, 8])
else:
plt.xlim([-5, 5])
plt.xlabel("Nuisance parameter value")
plt.legend()
plt.tight_layout()
plt.savefig("%s"%fname)
plt.close()
'''
Plot the marginal distributions as well as the scatter plots of bins pairwise.
'''
def plotPairs(self, fname):
fig = plt.figure(figsize=(10, 10))
sns.pairplot(pm.trace_to_dataframe(self.trace), kind="reg")
plt.tight_layout()
plt.savefig("%s"%fname)
plt.close()
'''
Plot the covariance matrix.
'''
def plotCov(self, fname):
fig = plt.figure(figsize=(10, 10))
plotH2D(np.cov(self.trace.Truth, rowvar = 0), "", "", "Covariance of unfolded bins", logz=False, fname = fname, fmt = "")
'''
Plot the Pearson correlation coefficients.
'''
def plotCorr(self, fname):
fig = plt.figure(figsize=(10, 10))
plotH2D(np.corrcoef(self.trace.Truth, rowvar = 0), "", "", "Correlation of unfolded bins", logz = False, fname = fname)
'''
Plot the Pearson correlation coefficients including the NPs.
'''
def plotCorrWithNP(self, fname):
tmp = np.zeros((self.Nt+len(self.systematics)+len(self.unf_systematics), self.N))
for i in range(0, self.Nt):
tmp[i, :] = self.trace.Truth[:, i]
for i in range(0, len(self.systematics)):
tmp[self.Nt+i, :] = self.trace['t_'+self.systematics[i]]
for i in range(0, len(self.unf_systematics)):
tmp[self.Nt+len(self.systematics)+i, :] = self.trace['tu_'+self.unf_systematics[i]]
tmplabel = ["%d" % i for i in range(0, self.Nt)] + self.systematics + self.unf_systematics
plotH2DWithText(np.corrcoef(tmp, rowvar = 1), tmplabel, "", "", "Correlation of posterior", fname)
'''
Plot skewness.
'''
def plotSkewness(self, fname):
fig = plt.figure(figsize=(10, 10))
sk = H1D(self.truth)
sk.val = stats.skew(self.trace.Truth, axis = 0, bias = False)
sk.err = np.zeros(len(sk.val))
sk.err_up = np.zeros(len(sk.val))
sk.err_dw = np.zeros(len(sk.val))
plotH1D(sk, "Unfolded bins", "Skewness", "Skewness of unfolded bins", logy = False, fname = fname)
'''
Plot nuisance parameter mode.
'''
def plotNP(self, fname):
fig = plt.figure(figsize=(10, 10))
plotH1DWithText(self.hnp_mode, "Nuisance parameter", "Nuisance parameter mode", fname)
'''
Plot unfolding nuisance parameter means and spread.
'''
def plotNPU(self, fname):
fig = plt.figure(figsize=(10, 10))
plotH1DWithText(self.hnpu_mode, "Nuisance parameter", "Nuisance parameter mode", fname)
'''
Plot kurtosis.
'''
def plotKurtosis(self, fname):
fig = plt.figure(figsize=(10, 10))
sk = H1D(self.truth)
sk.val = stats.kurtosis(self.trace.Truth, axis = 0, fisher = True, bias = False)
sk.err = np.zeros(len(sk.val))
sk.err_up = np.zeros(len(sk.val))
sk.err_dw = np.zeros(len(sk.val))
plotH1D(sk, "Unfolded bins", "Fisher kurtosis", "Kurtosis of unfolded bins", logy = False, fname = fname)
'''
Plot data, truth, reco and unfolded result
'''
def plotUnfolded(self, fname = "plotUnfolded.png"):
fig = plt.figure(figsize=(10, 10))
ymax = 0
for item in [self.truth, self.hunf_mode, self.hunf_smode, self.hunf]:
ma = np.amax(item.val)
ymax = np.amax([ymax, ma])
#plt.errorbar(self.data.x, self.data.val, self.data.err**0.5, self.data.x_err, fmt = 'bs', linewidth=2, label = "Pseudo-data", markersize=10)
#plt.errorbar(self.datasubbkg.x, self.datasubbkg.val, self.datasubbkg.err**0.5, self.datasubbkg.x_err, fmt = 'co', linewidth=2, label = "Background subtracted", markersize=10)
#plt.errorbar(self.recoWithoutFakes.x, self.recoWithoutFakes.val, self.recoWithoutFakes.err**0.5, self.recoWithoutFakes.x_err, fmt = 'mv', linewidth=2, label = "Expected signal (no fakes) distribution", markersize=5)
plt.errorbar(self.truth.x, self.truth.val, [self.truth.err_dw**0.5, self.truth.err_up**0.5], self.truth.x_err, fmt = 'g^', linewidth=2, label = "Truth", markersize=10)
plt.errorbar(self.hunf_median.x, self.hunf_median.val, [self.hunf_median.err_dw**0.5, self.hunf_median.err_up**0.5], self.hunf_median.x_err, fmt = 'k^', linewidth=2, label = "Marginal median", markersize = 5)
if doSymbolicMode:
plt.errorbar(self.hunf_smode.x, self.hunf_smode.val, [self.hunf_smode.err_dw**0.5, self.hunf_smode.err_up**0.5], self.hunf_smode.x_err, fmt = 'b^', linewidth=2, label = "Unfolded mode (symbolic)", markersize = 5)
plt.errorbar(self.hunf.x, self.hunf.val, [self.hunf.err_dw**0.5, self.hunf.err_up**0.5], self.hunf.x_err, fmt = 'rv', linewidth=2, label = "Marginal mean", markersize=5)
plt.errorbar(self.hunf_mode.x, self.hunf_mode.val, [self.hunf_mode.err_dw**0.5, self.hunf_mode.err_up**0.5], self.hunf_mode.x_err, fmt = 'm^', linewidth=2, label = "Unfolded mode", markersize = 5)
plt.ylim([0, ymax*1.2])
plt.legend()
plt.ylabel("Events")
plt.xlabel("Observable")
plt.tight_layout()
plt.savefig(fname)
plt.close()
'''
Plots only the unfolded distribution and expected after some normalisation factor f.
'''
def plotOnlyUnfolded(self, f = 1.0, normaliseByBinWidth = True, units = "fb", fname = "plotOnlyUnfolded.png"):
fig = plt.figure(figsize=(10, 10))
expectedCs = f*self.truth
observedCs = f*self.hunf
observedCs_mode = f*self.hunf_mode
if normaliseByBinWidth:
expectedCs = expectedCs.overBinWidth()
observedCs = observedCs.overBinWidth()
observedCs_mode = observedCs_mode.overBinWidth()
plt.errorbar(expectedCs.x, expectedCs.val, [expectedCs.err_dw**0.5, expectedCs.err_up**0.5], expectedCs.x_err, fmt = 'g^', linewidth=2, label = "Truth", markersize=10)
plt.errorbar(observedCs.x, observedCs.val, [observedCs.err_dw**0.5, observedCs.err_up**0.5], observedCs.x_err, fmt = 'rv', linewidth=2, label = "Marginal mean", markersize=5)
plt.errorbar(observedCs_mode.x, observedCs_mode.val, [observedCs_mode.err_dw**0.5, observedCs_mode.err_up**0.5], observedCs_mode.x_err, fmt = 'bv', linewidth=2, label = "Unfolded mode", markersize=5)
ymax = 0
for item in [expectedCs, observedCs, observedCs_mode]:
ma = np.amax(item.val)
ymax = np.amax([ymax, ma])
plt.legend()
if units != "":
plt.ylabel("Differential cross section ["+units+"]")
else:
plt.ylabel("Events")
plt.xlabel("Observable")
plt.ylim([0, ymax*1.2])
plt.tight_layout()
plt.savefig(fname)
plt.close()
|
daniloefl/Unfolder
|
Unfolder/Unfolder.py
|
Python
|
gpl-3.0
| 51,758
|
[
"Gaussian"
] |
e4879a9f6f2f3350a4c479e71c83719571838e6c1cbeec3de44b8fce91d7487b
|
from django.test import TestCase
from rest_framework.serializers import ValidationError
from push_notifications.api.rest_framework import APNSDeviceSerializer, GCMDeviceSerializer
GCM_DRF_INVALID_HEX_ERROR = {'device_id': [u"Device ID is not a valid hex number"]}
GCM_DRF_OUT_OF_RANGE_ERROR = {'device_id': [u"Device ID is out of range"]}
class APNSDeviceSerializerTestCase(TestCase):
def test_validation(self):
# valid data - 32 bytes upper case
serializer = APNSDeviceSerializer(data={
"registration_id": "AEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAEAE",
"name": "Apple iPhone 6+",
"device_id": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
})
self.assertTrue(serializer.is_valid())
# valid data - 32 bytes lower case
serializer = APNSDeviceSerializer(data={
"registration_id": "aeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeaeae",
"name": "Apple iPhone 6+",
"device_id": "ffffffffffffffffffffffffffffffff",
})
self.assertTrue(serializer.is_valid())
# valid data - 100 bytes upper case
serializer = APNSDeviceSerializer(data={
"registration_id": "AE" * 100,
"name": "Apple iPhone 6+",
"device_id": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
})
self.assertTrue(serializer.is_valid())
# valid data - 100 bytes lower case
serializer = APNSDeviceSerializer(data={
"registration_id": "ae" * 100,
"name": "Apple iPhone 6+",
"device_id": "ffffffffffffffffffffffffffffffff",
})
self.assertTrue(serializer.is_valid())
# invalid data - device_id, registration_id
serializer = APNSDeviceSerializer(data={
"registration_id": "invalid device token contains no hex",
"name": "Apple iPhone 6+",
"device_id": "ffffffffffffffffffffffffffffake",
})
self.assertFalse(serializer.is_valid())
class GCMDeviceSerializerTestCase(TestCase):
def test_device_id_validation_pass(self):
serializer = GCMDeviceSerializer(data={
"registration_id": "foobar",
"name": "Galaxy Note 3",
"device_id": "0x1031af3b",
})
self.assertTrue(serializer.is_valid())
def test_registration_id_unique(self):
"""Validate that a duplicate registration id raises a validation error."""
# add a device
serializer = GCMDeviceSerializer(data={
"registration_id": "foobar",
"name": "Galaxy Note 3",
"device_id": "0x1031af3b",
})
serializer.is_valid(raise_exception=True)
obj = serializer.save()
# ensure updating the same object works
serializer = GCMDeviceSerializer(obj, data={
"registration_id": "foobar",
"name": "Galaxy Note 5",
"device_id": "0x1031af3b",
})
serializer.is_valid(raise_exception=True)
obj = serializer.save()
# try to add a new device with the same token
serializer = GCMDeviceSerializer(data={
"registration_id": "foobar",
"name": "Galaxy Note 3",
"device_id": "0xdeadbeaf",
})
with self.assertRaises(ValidationError):
serializer.is_valid(raise_exception=True)
def test_device_id_validation_fail_bad_hex(self):
serializer = GCMDeviceSerializer(data={
"registration_id": "foobar",
"name": "Galaxy Note 3",
"device_id": "0x10r",
})
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors, GCM_DRF_INVALID_HEX_ERROR)
def test_device_id_validation_fail_out_of_range(self):
serializer = GCMDeviceSerializer(data={
"registration_id": "foobar",
"name": "Galaxy Note 3",
"device_id": "10000000000000000", # 2**64
})
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors, GCM_DRF_OUT_OF_RANGE_ERROR)
def test_device_id_validation_value_between_signed_unsigned_64b_int_maximums(self):
"""
2**63 < 0xe87a4e72d634997c < 2**64
"""
serializer = GCMDeviceSerializer(data={
"registration_id": "foobar",
"name": "Nexus 5",
"device_id": "e87a4e72d634997c",
})
self.assertTrue(serializer.is_valid())
|
hylje/django-push-notifications
|
tests/test_rest_framework.py
|
Python
|
mit
| 3,845
|
[
"Galaxy"
] |
dc3679e19c765b1cd2e05e5a683697cc82ea856da27629a79b4758bf0f553d4a
|
from __future__ import print_function, division
from sympy.core import S, sympify, Dummy, Mod
from sympy.core.function import Function, ArgumentIndexError
from sympy.core.logic import fuzzy_and
from sympy.core.numbers import Integer, pi
from sympy.core.relational import Eq
from sympy.ntheory import sieve
from math import sqrt as _sqrt
from sympy.core.compatibility import reduce, range
from sympy.core.cache import cacheit
from sympy.polys.polytools import poly_from_expr
from sympy.polys.polyerrors import PolificationFailed
class CombinatorialFunction(Function):
"""Base class for combinatorial functions. """
def _eval_simplify(self, ratio, measure):
from sympy.simplify.simplify import combsimp
expr = combsimp(self)
if measure(expr) <= ratio*measure(self):
return expr
return self
###############################################################################
######################## FACTORIAL and MULTI-FACTORIAL ########################
###############################################################################
class factorial(CombinatorialFunction):
"""Implementation of factorial function over nonnegative integers.
By convention (consistent with the gamma function and the binomial
coefficients), factorial of a negative integer is complex infinity.
The factorial is very important in combinatorics where it gives
the number of ways in which `n` objects can be permuted. It also
arises in calculus, probability, number theory, etc.
There is strict relation of factorial with gamma function. In
fact n! = gamma(n+1) for nonnegative integers. Rewrite of this
kind is very useful in case of combinatorial simplification.
Computation of the factorial is done using two algorithms. For
small arguments a precomputed look up table is used. However for bigger
input algorithm Prime-Swing is used. It is the fastest algorithm
known and computes n! via prime factorization of special class
of numbers, called here the 'Swing Numbers'.
Examples
========
>>> from sympy import Symbol, factorial, S
>>> n = Symbol('n', integer=True)
>>> factorial(0)
1
>>> factorial(7)
5040
>>> factorial(-2)
zoo
>>> factorial(n)
factorial(n)
>>> factorial(2*n)
factorial(2*n)
>>> factorial(S(1)/2)
factorial(1/2)
See Also
========
factorial2, RisingFactorial, FallingFactorial
"""
def fdiff(self, argindex=1):
from sympy import gamma, polygamma
if argindex == 1:
return gamma(self.args[0] + 1)*polygamma(0, self.args[0] + 1)
else:
raise ArgumentIndexError(self, argindex)
_small_swing = [
1, 1, 1, 3, 3, 15, 5, 35, 35, 315, 63, 693, 231, 3003, 429, 6435, 6435, 109395,
12155, 230945, 46189, 969969, 88179, 2028117, 676039, 16900975, 1300075,
35102025, 5014575, 145422675, 9694845, 300540195, 300540195
]
_small_factorials = []
@classmethod
def _swing(cls, n):
if n < 33:
return cls._small_swing[n]
else:
N, primes = int(_sqrt(n)), []
for prime in sieve.primerange(3, N + 1):
p, q = 1, n
while True:
q //= prime
if q > 0:
if q & 1 == 1:
p *= prime
else:
break
if p > 1:
primes.append(p)
for prime in sieve.primerange(N + 1, n//3 + 1):
if (n // prime) & 1 == 1:
primes.append(prime)
L_product = R_product = 1
for prime in sieve.primerange(n//2 + 1, n + 1):
L_product *= prime
for prime in primes:
R_product *= prime
return L_product*R_product
@classmethod
def _recursive(cls, n):
if n < 2:
return 1
else:
return (cls._recursive(n//2)**2)*cls._swing(n)
@classmethod
def eval(cls, n):
n = sympify(n)
if n.is_Number:
if n is S.Zero:
return S.One
elif n is S.Infinity:
return S.Infinity
elif n.is_Integer:
if n.is_negative:
return S.ComplexInfinity
else:
n = n.p
if n < 20:
if not cls._small_factorials:
result = 1
for i in range(1, 20):
result *= i
cls._small_factorials.append(result)
result = cls._small_factorials[n-1]
else:
bits = bin(n).count('1')
result = cls._recursive(n)*2**(n - bits)
return Integer(result)
def _eval_rewrite_as_gamma(self, n):
from sympy import gamma
return gamma(n + 1)
def _eval_rewrite_as_Product(self, n):
from sympy import Product
if n.is_nonnegative and n.is_integer:
i = Dummy('i', integer=True)
return Product(i, (i, 1, n))
def _eval_is_integer(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_is_positive(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_is_composite(self):
x = self.args[0]
if x.is_integer:
return (x - 3).is_nonnegative
def _eval_is_real(self):
x = self.args[0]
if x.is_nonnegative or x.is_noninteger:
return True
class MultiFactorial(CombinatorialFunction):
pass
class subfactorial(CombinatorialFunction):
r"""The subfactorial counts the derangements of n items and is
defined for non-negative integers as::
,
| 1 for n = 0
!n = { 0 for n = 1
| (n - 1)*(!(n - 1) + !(n - 2)) for n > 1
`
It can also be written as int(round(n!/exp(1))) but the recursive
definition with caching is implemented for this function.
An interesting analytic expression is the following [2]_
.. math:: !x = \Gamma(x + 1, -1)/e
which is valid for non-negative integers x. The above formula
is not very useful incase of non-integers. :math:`\Gamma(x + 1, -1)` is
single-valued only for integral arguments x, elsewhere on the positive real
axis it has an infinite number of branches none of which are real.
References
==========
.. [1] http://en.wikipedia.org/wiki/Subfactorial
.. [2] http://mathworld.wolfram.com/Subfactorial.html
Examples
========
>>> from sympy import subfactorial
>>> from sympy.abc import n
>>> subfactorial(n + 1)
subfactorial(n + 1)
>>> subfactorial(5)
44
See Also
========
sympy.functions.combinatorial.factorials.factorial,
sympy.utilities.iterables.generate_derangements,
sympy.functions.special.gamma_functions.uppergamma
"""
@classmethod
@cacheit
def _eval(self, n):
if not n:
return S.One
elif n == 1:
return S.Zero
return (n - 1)*(self._eval(n - 1) + self._eval(n - 2))
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg.is_Integer and arg.is_nonnegative:
return cls._eval(arg)
elif arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
def _eval_is_even(self):
if self.args[0].is_odd and self.args[0].is_nonnegative:
return True
def _eval_is_integer(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_rewrite_as_uppergamma(self, arg):
from sympy import uppergamma
return uppergamma(arg + 1, -1)/S.Exp1
def _eval_is_nonnegative(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_is_odd(self):
if self.args[0].is_even and self.args[0].is_nonnegative:
return True
class factorial2(CombinatorialFunction):
"""The double factorial n!!, not to be confused with (n!)!
The double factorial is defined for nonnegative integers and for odd
negative integers as::
,
| n*(n - 2)*(n - 4)* ... * 1 for n positive odd
n!! = { n*(n - 2)*(n - 4)* ... * 2 for n positive even
| 1 for n = 0
| (n+2)!! / (n+2) for n negative odd
`
References
==========
.. [1] https://en.wikipedia.org/wiki/Double_factorial
Examples
========
>>> from sympy import factorial2, var
>>> var('n')
n
>>> factorial2(n + 1)
factorial2(n + 1)
>>> factorial2(5)
15
>>> factorial2(-1)
1
>>> factorial2(-5)
1/3
See Also
========
factorial, RisingFactorial, FallingFactorial
"""
@classmethod
def eval(cls, arg):
# TODO: extend this to complex numbers?
if arg.is_Number:
if not arg.is_Integer:
raise ValueError("argument must be nonnegative integer or negative odd integer")
# This implementation is faster than the recursive one
# It also avoids "maximum recursion depth exceeded" runtime error
if arg.is_nonnegative:
if arg.is_even:
k = arg / 2
return 2 ** k * factorial(k)
return factorial(arg) / factorial2(arg - 1)
if arg.is_odd:
return arg * (S.NegativeOne) ** ((1 - arg) / 2) / factorial2(-arg)
raise ValueError("argument must be nonnegative integer or negative odd integer")
def _eval_is_even(self):
# Double factorial is even for every positive even input
n = self.args[0]
if n.is_integer:
if n.is_odd:
return False
if n.is_even:
if n.is_positive:
return True
if n.is_zero:
return False
def _eval_is_integer(self):
# Double factorial is an integer for every nonnegative input, and for
# -1 and -3
n = self.args[0]
if n.is_integer:
if (n + 1).is_nonnegative:
return True
if n.is_odd:
return (n + 3).is_nonnegative
def _eval_is_odd(self):
# Double factorial is odd for every odd input not smaller than -3, and
# for 0
n = self.args[0]
if n.is_odd:
return (n + 3).is_nonnegative
if n.is_even:
if n.is_positive:
return False
if n.is_zero:
return True
def _eval_is_positive(self):
# Double factorial is positive for every nonnegative input, and for
# every odd negative input which is of the form -1-4k for an
# nonnegative integer k
n = self.args[0]
if n.is_integer:
if (n + 1).is_nonnegative:
return True
if n.is_odd:
return ((n + 1) / 2).is_even
def _eval_rewrite_as_gamma(self, n):
from sympy import gamma, Piecewise, sqrt
return 2**(n/2)*gamma(n/2 + 1) * Piecewise((1, Eq(Mod(n, 2), 0)), (sqrt(2/pi), Eq(Mod(n, 2), 1)))
###############################################################################
######################## RISING and FALLING FACTORIALS ########################
###############################################################################
class RisingFactorial(CombinatorialFunction):
"""Rising factorial (also called Pochhammer symbol) is a double valued
function arising in concrete mathematics, hypergeometric functions
and series expansions. It is defined by:
rf(x, k) = x * (x + 1) * ... * (x + k - 1)
where 'x' can be arbitrary expression and 'k' is an integer. For
more information check "Concrete mathematics" by Graham, pp. 66
or visit http://mathworld.wolfram.com/RisingFactorial.html page.
When x is a polynomial f of a single variable y of order >= 1,
rf(x,k) = f(y) * f(y+1) * ... * f(x+k-1) as described in
Peter Paule, "Greatest Factorial Factorization and Symbolic Summation",
Journal of Symbolic Computation, vol. 20, pp. 235-268, 1995.
Examples
========
>>> from sympy import rf, symbols, factorial, ff, binomial
>>> from sympy.abc import x
>>> n, k = symbols('n k', integer=True)
>>> rf(x, 0)
1
>>> rf(1, 5)
120
>>> rf(x, 5) == x*(1 + x)*(2 + x)*(3 + x)*(4 + x)
True
>>> rf(x**3, 2)
Poly(x**6 + 3*x**5 + 3*x**4 + x**3, x, domain='ZZ')
Rewrite
>>> rf(x, k).rewrite(ff)
FallingFactorial(k + x - 1, k)
>>> rf(x, k).rewrite(binomial)
binomial(k + x - 1, k)*factorial(k)
>>> rf(n, k).rewrite(factorial)
factorial(k + n - 1)/factorial(n - 1)
See Also
========
factorial, factorial2, FallingFactorial
References
==========
.. [1] https://en.wikipedia.org/wiki/Pochhammer_symbol
"""
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN or k is S.NaN:
return S.NaN
elif x is S.One:
return factorial(k)
elif k.is_Integer:
if k is S.Zero:
return S.One
else:
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
try:
F, opt = poly_from_expr(x)
except PolificationFailed:
return reduce(lambda r, i: r*(x + i), range(0, int(k)), 1)
if len(opt.gens) > 1 or F.degree() <= 1:
return reduce(lambda r, i: r*(x + i), range(0, int(k)), 1)
else:
v = opt.gens[0]
return reduce(lambda r, i:
r*(F.subs(v, v + i).expand()),
range(0, int(k)), 1)
else:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
return S.Infinity
else:
try:
F, opt = poly_from_expr(x)
except PolificationFailed:
return 1/reduce(lambda r, i:
r*(x - i),
range(1, abs(int(k)) + 1), 1)
if len(opt.gens) > 1 or F.degree() <= 1:
return 1/reduce(lambda r, i:
r*(x - i),
range(1, abs(int(k)) + 1), 1)
else:
v = opt.gens[0]
return 1/reduce(lambda r, i:
r*(F.subs(v, v - i).expand()),
range(1, abs(int(k)) + 1), 1)
def _eval_rewrite_as_gamma(self, x, k):
from sympy import gamma
return gamma(x + k) / gamma(x)
def _eval_rewrite_as_FallingFactorial(self, x, k):
return FallingFactorial(x + k - 1, k)
def _eval_rewrite_as_factorial(self, x, k):
if x.is_integer and k.is_integer:
return factorial(k + x - 1) / factorial(x - 1)
def _eval_rewrite_as_binomial(self, x, k):
if k.is_integer:
return factorial(k) * binomial(x + k - 1, k)
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer,
self.args[1].is_nonnegative))
def _sage_(self):
import sage.all as sage
return sage.rising_factorial(self.args[0]._sage_(), self.args[1]._sage_())
class FallingFactorial(CombinatorialFunction):
"""Falling factorial (related to rising factorial) is a double valued
function arising in concrete mathematics, hypergeometric functions
and series expansions. It is defined by
ff(x, k) = x * (x-1) * ... * (x - k+1)
where 'x' can be arbitrary expression and 'k' is an integer. For
more information check "Concrete mathematics" by Graham, pp. 66
or visit http://mathworld.wolfram.com/FallingFactorial.html page.
When x is a polynomial f of a single variable y of order >= 1,
ff(x,k) = f(y) * f(y-1) * ... * f(x-k+1) as described in
Peter Paule, "Greatest Factorial Factorization and Symbolic Summation",
Journal of Symbolic Computation, vol. 20, pp. 235-268, 1995.
>>> from sympy import ff, factorial, rf, gamma, polygamma, binomial, symbols
>>> from sympy.abc import x, k
>>> n, m = symbols('n m', integer=True)
>>> ff(x, 0)
1
>>> ff(5, 5)
120
>>> ff(x, 5) == x*(x-1)*(x-2)*(x-3)*(x-4)
True
>>> ff(x**2, 2)
Poly(x**4 - 2*x**3 + x**2, x, domain='ZZ')
>>> ff(n, n)
factorial(n)
Rewrite
>>> ff(x, k).rewrite(gamma)
(-1)**k*gamma(k - x)/gamma(-x)
>>> ff(x, k).rewrite(rf)
RisingFactorial(-k + x + 1, k)
>>> ff(x, m).rewrite(binomial)
binomial(x, m)*factorial(m)
>>> ff(n, m).rewrite(factorial)
factorial(n)/factorial(-m + n)
See Also
========
factorial, factorial2, RisingFactorial
References
==========
.. [1] http://mathworld.wolfram.com/FallingFactorial.html
"""
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN or k is S.NaN:
return S.NaN
elif k.is_integer and x == k:
return factorial(x)
elif k.is_Integer:
if k is S.Zero:
return S.One
else:
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
try:
F, opt = poly_from_expr(x)
except PolificationFailed:
return reduce(lambda r, i: r*(x - i),
range(0, int(k)), 1)
if len(opt.gens) > 1 or F.degree() <= 1:
return reduce(lambda r, i: r*(x - i),
range(0, int(k)), 1)
else:
v = opt.gens[0]
return reduce(lambda r, i:
r*(F.subs(v, v - i).expand()),
range(0, int(k)), 1)
else:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
return S.Infinity
else:
try:
F, opt = poly_from_expr(x)
except PolificationFailed:
return 1/reduce(lambda r, i: r*(x + i),
range(1, abs(int(k)) + 1), 1)
if len(opt.gens) > 1 or F.degree() <= 1:
return 1/reduce(lambda r, i: r*(x + i),
range(1, abs(int(k)) + 1), 1)
else:
v = opt.gens[0]
return 1/reduce(lambda r, i:
r*(F.subs(v, v + i).expand()),
range(1, abs(int(k)) + 1), 1)
def _eval_rewrite_as_gamma(self, x, k):
from sympy import gamma
return (-1)**k*gamma(k - x) / gamma(-x)
def _eval_rewrite_as_RisingFactorial(self, x, k):
return rf(x - k + 1, k)
def _eval_rewrite_as_binomial(self, x, k):
if k.is_integer:
return factorial(k) * binomial(x, k)
def _eval_rewrite_as_factorial(self, x, k):
if x.is_integer and k.is_integer:
return factorial(x) / factorial(x - k)
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer,
self.args[1].is_nonnegative))
def _sage_(self):
import sage.all as sage
return sage.falling_factorial(self.args[0]._sage_(),
self.args[1]._sage_())
rf = RisingFactorial
ff = FallingFactorial
###############################################################################
########################### BINOMIAL COEFFICIENTS #############################
###############################################################################
class binomial(CombinatorialFunction):
"""Implementation of the binomial coefficient. It can be defined
in two ways depending on its desired interpretation:
C(n,k) = n!/(k!(n-k)!) or C(n, k) = ff(n, k)/k!
First, in a strict combinatorial sense it defines the
number of ways we can choose 'k' elements from a set of
'n' elements. In this case both arguments are nonnegative
integers and binomial is computed using an efficient
algorithm based on prime factorization.
The other definition is generalization for arbitrary 'n',
however 'k' must also be nonnegative. This case is very
useful when evaluating summations.
For the sake of convenience for negative 'k' this function
will return zero no matter what valued is the other argument.
To expand the binomial when n is a symbol, use either
expand_func() or expand(func=True). The former will keep the
polynomial in factored form while the latter will expand the
polynomial itself. See examples for details.
Examples
========
>>> from sympy import Symbol, Rational, binomial, expand_func
>>> n = Symbol('n', integer=True, positive=True)
>>> binomial(15, 8)
6435
>>> binomial(n, -1)
0
Rows of Pascal's triangle can be generated with the binomial function:
>>> for N in range(8):
... print([ binomial(N, i) for i in range(N + 1)])
...
[1]
[1, 1]
[1, 2, 1]
[1, 3, 3, 1]
[1, 4, 6, 4, 1]
[1, 5, 10, 10, 5, 1]
[1, 6, 15, 20, 15, 6, 1]
[1, 7, 21, 35, 35, 21, 7, 1]
As can a given diagonal, e.g. the 4th diagonal:
>>> N = -4
>>> [ binomial(N, i) for i in range(1 - N)]
[1, -4, 10, -20, 35]
>>> binomial(Rational(5, 4), 3)
-5/128
>>> binomial(Rational(-5, 4), 3)
-195/128
>>> binomial(n, 3)
binomial(n, 3)
>>> binomial(n, 3).expand(func=True)
n**3/6 - n**2/2 + n/3
>>> expand_func(binomial(n, 3))
n*(n - 2)*(n - 1)/6
"""
def fdiff(self, argindex=1):
from sympy import polygamma
if argindex == 1:
# http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/01/
n, k = self.args
return binomial(n, k)*(polygamma(0, n + 1) - \
polygamma(0, n - k + 1))
elif argindex == 2:
# http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/02/
n, k = self.args
return binomial(n, k)*(polygamma(0, n - k + 1) - \
polygamma(0, k + 1))
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def _eval(self, n, k):
# n.is_Number and k.is_Integer and k != 1 and n != k
if k.is_Integer:
if n.is_Integer and n >= 0:
n, k = int(n), int(k)
if k > n:
return S.Zero
elif k > n // 2:
k = n - k
M, result = int(_sqrt(n)), 1
for prime in sieve.primerange(2, n + 1):
if prime > n - k:
result *= prime
elif prime > n // 2:
continue
elif prime > M:
if n % prime < k % prime:
result *= prime
else:
N, K = n, k
exp = a = 0
while N > 0:
a = int((N % prime) < (K % prime + a))
N, K = N // prime, K // prime
exp = a + exp
if exp > 0:
result *= prime**exp
return Integer(result)
else:
d = result = n - k + 1
for i in range(2, k + 1):
d += 1
result *= d
result /= i
return result
@classmethod
def eval(cls, n, k):
n, k = map(sympify, (n, k))
d = n - k
if d.is_zero or k.is_zero:
return S.One
elif d.is_zero is False:
if (k - 1).is_zero:
return n
elif k.is_negative:
return S.Zero
elif n.is_integer and n.is_nonnegative and d.is_negative:
return S.Zero
if k.is_Integer and k > 0 and n.is_Number:
return cls._eval(n, k)
def _eval_expand_func(self, **hints):
"""
Function to expand binomial(n,k) when m is positive integer
Also,
n is self.args[0] and k is self.args[1] while using binomial(n, k)
"""
n = self.args[0]
if n.is_Number:
return binomial(*self.args)
k = self.args[1]
if k.is_Add and n in k.args:
k = n - k
if k.is_Integer:
if k == S.Zero:
return S.One
elif k < 0:
return S.Zero
else:
n = self.args[0]
result = n - k + 1
for i in range(2, k + 1):
result *= n - k + i
result /= i
return result
else:
return binomial(*self.args)
def _eval_rewrite_as_factorial(self, n, k):
return factorial(n)/(factorial(k)*factorial(n - k))
def _eval_rewrite_as_gamma(self, n, k):
from sympy import gamma
return gamma(n + 1)/(gamma(k + 1)*gamma(n - k + 1))
def _eval_rewrite_as_tractable(self, n, k):
return self._eval_rewrite_as_gamma(n, k).rewrite('tractable')
def _eval_rewrite_as_FallingFactorial(self, n, k):
if k.is_integer:
return ff(n, k) / factorial(k)
def _eval_is_integer(self):
n, k = self.args
if n.is_integer and k.is_integer:
return True
elif k.is_integer is False:
return False
|
ChristinaZografou/sympy
|
sympy/functions/combinatorial/factorials.py
|
Python
|
bsd-3-clause
| 27,826
|
[
"VisIt"
] |
dac60b72fd765d39153d4ba10bfd94224133d27f9b5ac421061fdb7e27d0f371
|
import os
import uuid
import tempfile
import tornado.process
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.Core.Security import Locations, X509Chain
BASECS = "/WebApp"
def getCSValue( opt, defValue = None ):
return gConfig.getValue( "%s/%s" % ( BASECS, opt ), defValue )
def getTitle():
defVal = gConfig.getValue( "/DIRAC/Configuration/Name", gConfig.getValue( "/DIRAC/Setup" ) )
return "%s - DIRAC" % gConfig.getValue( "%s/Title" % BASECS, defVal )
def devMode():
return getCSValue( "DevelopMode", True )
def rootURL():
return getCSValue( "RootURL", "/DIRAC" )
def balancer():
b = getCSValue( "Balancer", "" ).lower()
if b in ( "", "none" ):
return ""
return b
def numProcesses():
return getCSValue( "NumProcesses", 1 )
def HTTPS():
if balancer():
return False
return getCSValue( "HTTPS/Enabled", True )
def HTTPPort():
if balancer():
default = 8000
else:
default = 8080
procAdd = tornado.process.task_id() or 0
return getCSValue( "HTTP/Port", default ) + procAdd
def HTTPSPort():
return getCSValue( "HTTPS/Port", 8443 )
def HTTPSCert():
cert = Locations.getHostCertificateAndKeyLocation()
if cert:
cert = cert[0]
else:
cert = "/opt/dirac/etc/grid-security/hostcert.pem"
return getCSValue( "HTTPS/Cert", cert )
def HTTPSKey():
key = Locations.getHostCertificateAndKeyLocation()
if key:
key = key[1]
else:
key = "/opt/dirac/etc/grid-security/hostkey.pem"
return getCSValue( "HTTPS/Key", key )
def setup():
return gConfig.getValue( "/DIRAC/Setup" )
def cookieSecret():
# TODO: Store the secret somewhere
return gConfig.getValue( "CookieSecret", uuid.getnode() )
def generateCAFile():
"""
Generate a single CA file with all the PEMs
"""
caDir = Locations.getCAsLocation()
for fn in ( os.path.join( os.path.dirname( caDir ), "cas.pem" ),
os.path.join( os.path.dirname( HTTPSCert() ), "cas.pem" ),
False ):
if not fn:
fn = tempfile.mkstemp( prefix = "cas.", suffix = ".pem" )
try:
fd = open( fn, "w" )
except IOError:
continue
for caFile in os.listdir( caDir ):
caFile = os.path.join( caDir, caFile )
result = X509Chain.X509Chain.instanceFromFile( caFile )
if not result[ 'OK' ]:
continue
chain = result[ 'Value' ]
expired = chain.hasExpired()
if not expired[ 'OK' ] or expired[ 'Value' ]:
continue
fd.write( chain.dumpAllToString()[ 'Value' ] )
fd.close()
return fn
return False
def getAuthSectionForHandler( route ):
return "%s/Access/%s" % ( BASECS, route )
def getTheme():
return getCSValue( "Theme", "desktop" )
def getIcon():
return getCSValue("Icon","/static/core/img/icons/system/favicon.ico")
def SSLProrocol():
return getCSValue( "SSLProtcol", "" )
|
chaen/WebAppDIRAC
|
Lib/Conf.py
|
Python
|
gpl-3.0
| 2,817
|
[
"DIRAC"
] |
7d6ae31e21c8d9602f62a11177a10385e3efe6afe7b3af48dba8ab0369446cb4
|
#!/home/OSUMC.EDU/blac96/source/venv/test_mucor_pip/bin/python
# -*- coding: utf8
# Copyright 2013-2015 James S Blachly, MD and The Ohio State University
#
# This file is part of Mucor.
#
# Mucor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mucor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mucor. If not, see <http://www.gnu.org/licenses/>.
# let print() be a function rather than statement
# ala python3
from __future__ import print_function
# python standard modules
import os
import re
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import time
import argparse
import csv
import itertools
from collections import defaultdict
import gzip
import cPickle as pickle
import json
# nonstandard modules
import numpy as np
import pandas as pd
from pandas import ExcelWriter
import HTSeq
import pysam
# optional modules
# mucor modules
from config import Config
from mucor_config import parseAndValidateRegions
def abortWithMessage(message):
print("*** FATAL ERROR: " + message + " ***")
exit(2)
def throwWarning(message, help = False):
print("*** WARNING: " + message + " ***")
return
def parseJSON(json_config):
'''
Import the JSON config file from mucor_config.py.
Reads the config file into a dictionary, then writes each dictionary entry into the respective Config class position.
'''
config = Config()
try:
JD = json.load(open(json_config,'r'))
except ValueError as json_error:
throwWarning(json_error.message)
abortWithMessage("Could not load the given JSON config file. See the example config for proper formatting.")
# write dictionary values into a more human-friendly config class
config.outputDir = os.path.expanduser(JD['outputDir'])
# no longer take region definition from json config file
# command line argument now
if str(JD['regions']):
config.regions = JD['regions']
else:
config.regions = []
config.inputFiles = []
config.samples = []
config.bams = {}
for i in JD['samples']:
# make a list of bam file paths defined by the JSON config
# i.e. extract the path of every file, if the file type is bam
bams = [x['path'] for x in i['files'] if x['type'] == "bam"]
if len(bams) >= 1:
for bam in bams:
config.inputFiles.append(bam)
config.samples.append(i['id'])
try:
config.bams[i['id']].append(bam)
except KeyError:
config.bams[i['id']] = []
config.bams[i['id']].append(bam)
else:
throwWarning("Sample {0} has no BAM file!".format(i['id']))
if not config.bams:
abortWithMessage("No bam files defined!")
return config
def parseRegionBed(regionfile, regionDictIn):
'''
Read through the supplied bed file and add the rows to the input region dictionary before returning it. Appends to the input dict, rather than overwriting it.
'''
regionDict = regionDictIn
for line in open(str(regionfile),'r'):
col = line.strip().split("\t")
chrom = col[0]
start = col[1]
end = col[2]
if len(col) > 3:
name = col[3]
else:
name = str(chrom) + ":" + str(start) + "-" + str(end)
regionDict[chrom].add((start,end,name))
return regionDict
def inRegionDict(chrom, start, end, regionDict):
'''Checks the given mutation location to see if it is in the dictionary of regions'''
if regionDict[chrom]: # are there any regions of interest in the same chromosome as this mutation?
for locs in regionDict[chrom]: # breaking the list of regions according to chromosome should significantly decrease the number of comparisons necessary
if locs[0] == 0 and locs[1] == 0: # chrN:0-0 is used to define an entire chromosome as a region of interest.
return True
elif int(start) >= int(locs[0]) and int(end) <= int(locs[1]):
return True
return False
def GaugeDepth(config) :
'''
assess the depth of each sample at the given regions
'''
startTime = time.clock()
regionDict = defaultdict(set)
for item in config.regions:
if str(str(item).split('.')[-1]).lower() == 'bed': # this item is a bed file
regionDict = parseRegionBed(item, regionDict)
else: # this was defined as a string
reg_chr = item[0]
reg_str = item[1]
reg_end = item[2]
if reg_str >= 0 and reg_end > reg_str:
name = str(reg_chr) + ":" + str(reg_str) + "-" + str(reg_end)
elif reg_str >= 0 and reg_str == reg_end:
name = str(reg_chr) + ":" + str(reg_str)
elif not reg_str and not reg_end:
name = str(reg_chr)
regionDict[reg_chr].add((reg_str, reg_end, name))
if not regionDict:
abortWithMessage("Regions not set!")
covD = {'chr':[], 'start':[], 'stop':[], 'name':[], 'sample':[],'depth':[]}
print("\n=== Reading BAM Files ===")
for sid, fns in config.bams.items():
# loop over all samples
for fn in fns:
# loop over all bam files for this sample
try:
samfile = pysam.AlignmentFile(fn, "rb" )
except ValueError:
throwWarning("Cannot open file {0}".format(fn))
continue
contig_length_dict = dict(zip(samfile.references, samfile.lengths)) # save contig lengths for later
for contig, ROI in regionDict.items():
for window in ROI:
bed_name = window[2]
# make window 0-based
try:
window = [int(window[0]) - 1, int(window[1])]
except TypeError:
# start and/or stop were not defined - pull them from contig dictionary
if not window[0]:
start = 0
else:
start = int(window[0] - 1)
if not window[1]:
end = contig_length_dict[contig]
else:
end = int(window[1])
window = [start, end]
# loop over all ROIs, checking this bam
if config.p:
#point method
tmp_dict = {}
position = round((window[1] - window[0])/2.0) + window[0]
avg_covg = samfile.count(contig, position - 1, position)
#for position in range(window[0],window[1]):
# region = str(contig) + ':' + str(position) + '-' + str(position)
# tmp_dict[position] = samfile.count(region=region)
#avg_covg = np.mean(tmp_dict.values())
elif config.c:
#read count method
avg_covg = samfile.count(contig, window[0], window[1])
#note that "avg_covg" is only a name here - it is the total count of reads, not an average!
else:
#complete average method
#'''
tmp_dict = {}
for position in range(window[0],window[1]):
tmp_dict[position] = 0
for pileupcolumn in samfile.pileup(contig, window[0], window[1],stepper='all'):
#loop over reads that hit the window locations and record coverage
# 'stepper = all' yields mapped, primary, non-duplicate (identified by sam flag), QC pass reads
try:
tmp_dict[pileupcolumn.pos]
tmp_dict[pileupcolumn.pos] = pileupcolumn.n
except KeyError:
#skip this position if it's not in the region dict
continue
avg_covg = np.mean(tmp_dict.values())
#'''
'''
#this behaves erratically and does not produce the same number if run repeatedly
# not sure how to use the function, but it could be faster than pileup
counter = 0
for ct_cov in samfile.count_coverage(contig, window[0], window[1], read_callback = 'all'):
for nt_arr in ct_cov:
counter += int(nt_arr)
stop()
avg_covg = counter/float(window[1] - window[0])
'''
covD['chr'].append(str(contig))
covD['start'].append(int(window[0]) + 1)
covD['stop'].append(int(window[1]))
covD['name'].append(str(bed_name))
covD['sample'].append(str(sid))
covD['depth'].append(float(avg_covg))
samfile.close()
totalTime = time.clock() - startTime
print("{0:02d}:{1:02d}\t{2}".format(int(totalTime/60), int(totalTime % 60), fn))
covDF = pd.DataFrame.from_dict(covD)[['chr','start','stop','name','sample','depth']]
covDF = covDF.groupby(['chr','start','stop','name','sample'])['depth'].apply(sum).reset_index()
totalTime = time.clock() - startTime
print("\n{0:02d}:{1:02d}\t{2}".format(int(totalTime/60), int(totalTime % 60), "Done"))
return covDF
def SamplesToColumns(grp):
'''helper function to rearrange the coverage dataframe'''
columns = ['chr','start','stop','name']
values = [grp['chr'].values[0], grp['start'].values[0], grp['stop'].values[0],grp['name'].values[0]]
grp_dict = {}
for i in grp[['sample','depth']].T.itertuples():
grp_dict[i[0]] = list(i[1:])
columns += grp_dict['sample']
values += grp_dict['depth']
return pd.DataFrame.from_dict(dict(zip(columns,values), index=[0]))[columns]
def printOutput(config, covDF):
'''reformat the coverage dataframe and write it to xlsx file'''
startTime = time.clock()
print("\n=== Writing output files to {0}/ ===".format(config.outputDir))
outDF = covDF.groupby(['chr','start','stop','name']).apply(SamplesToColumns).reset_index(drop=True)
# pandas can actually start up an excel writer object using a non-existant or unwritable file path. The error will come when saving to such a path. tested in pandas version 0.16.2
outXLSX = pd.ExcelWriter(str(config.outputDir) + "/" + config.outfile, engine='xlsxwriter')
outDF.to_excel(outXLSX, 'Depth Gauge', index=False)
try:
outXLSX.save()
except IOError:
abortWithMessage("Output directory is not writable or doesn't exist: {0}".format(config.outputDir))
totalTime = time.clock() - startTime
print("\t{0}: {1} rows".format(str(config.outputDir) + "/" + config.outfile , len(outDF)))
return True
def main():
print("\n=== Run Info ===")
print("\t{0}".format(time.ctime() ) )
print()
parser = argparse.ArgumentParser()
parser.add_argument("json", help="Pass 1 json config as an argument")
parser.add_argument("-p", "--point", dest='p', action="store_true", help="point-location coverage approximation on middle of region windows")
parser.add_argument("-c", "--count", dest='c', action="store_true", help="report total number of reads in each region window (not an average)")
parser.add_argument("-r", "--regions", default="", help="Comma separated list of bed regions and/or bed files by which to limit output. Ex: chr1:10230-10240,chr2,my_regions.bed")
args = parser.parse_args()
config = parseJSON(args.json)
config.p = bool(args.p)
config.c = bool(args.c)
if config.p and config.c:
abortWithMessage("Cannot declare -p and -c options together!")
if config.p:
config.outfile = 'Depth_of_Coverage-p.xlsx'
elif config.c:
config.outfile = 'Depth_of_Coverage-c.xlsx'
else:
config.outfile = 'Depth_of_Coverage.xlsx'
if os.path.exists(config.outputDir) and config.outfile in [str(x) for x in os.listdir(config.outputDir)]:
abortWithMessage("The directory {0} already exists and contains depth gauge output. Will not overwrite.".format(config.outputDir))
elif not os.path.exists(config.outputDir):
try:
os.makedirs(config.outputDir)
except OSError:
abortWithMessage("Error when creating output directory {0}".format(config.outputDir))
# check that all specified variant files exist
for fn in config.inputFiles:
if not os.path.exists(fn):
abortWithMessage("Could not find variant file: {0}".format(fn))
config.regions = parseAndValidateRegions(args.regions, config)
covDF = GaugeDepth(config)
printOutput(config, covDF)
# pretty print newline before exit
print()
if __name__ == "__main__":
if sys.hexversion < 0x02070000:
raise RuntimeWarning("mucor should be run on python 2.7.0 or greater.")
main()
|
blachlylab/mucor
|
build/scripts-2.7/depth_gauge.py
|
Python
|
gpl-3.0
| 13,906
|
[
"HTSeq",
"pysam"
] |
9e177e692979303a567f123403e89d9bb626b1f689eba9c3f433856b4deda818
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Normal (Gaussian) distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.distributions.python.ops import distribution # pylint: disable=line-too-long
from tensorflow.contrib.distributions.python.ops import kullback_leibler # pylint: disable=line-too-long
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util # pylint: disable=line-too-long
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
class Normal(distribution.ContinuousDistribution):
"""The scalar Normal distribution with mean and stddev parameters mu, sigma.
#### Mathematical details
The PDF of this distribution is:
```f(x) = sqrt(1/(2*pi*sigma^2)) exp(-(x-mu)^2/(2*sigma^2))```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
# Define a single scalar Normal distribution.
dist = tf.contrib.distributions.Normal(mu=0, sigma=3)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1)
# Define a batch of two scalar valued Normals.
# The first has mean 1 and standard deviation 11, the second 2 and 22.
dist = tf.contrib.distributions.Normal(mu=[1, 2.], sigma=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.pdf([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample(3)
```
Arguments are broadcast when possible.
```python
# Define a batch of two scalar valued Normals.
# Both have mean 1, but different standard deviations.
dist = tf.contrib.distributions.Normal(mu=1, sigma=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.pdf(3.0)
```
"""
def __init__(self, mu, sigma, name="Normal"):
"""Construct Normal distributions with mean and stddev `mu` and `sigma`.
The parameters `mu` and `sigma` must be shaped in a way that supports
broadcasting (e.g. `mu + sigma` is a valid operation).
Args:
mu: `float` or `double` tensor, the means of the distribution(s).
sigma: `float` or `double` tensor, the stddevs of the distribution(s).
sigma must contain only positive values.
name: The name to give Ops created by the initializer.
Raises:
TypeError: if mu and sigma are different dtypes.
"""
with ops.op_scope([mu, sigma], name):
mu = ops.convert_to_tensor(mu)
sigma = ops.convert_to_tensor(sigma)
with ops.control_dependencies([check_ops.assert_positive(sigma)]):
self._name = name
self._mu = array_ops.identity(mu, name="mu")
self._sigma = array_ops.identity(sigma, name="sigma")
self._batch_shape = self._ones().get_shape()
self._event_shape = tensor_shape.TensorShape([])
contrib_tensor_util.assert_same_float_dtype((mu, sigma))
@property
def name(self):
return self._name
@property
def dtype(self):
return self._mu.dtype
def batch_shape(self, name="batch_shape"):
"""Batch dimensions of this instance as a 1-D int32 `Tensor`.
The product of the dimensions of the `batch_shape` is the number of
independent distributions of this kind the instance represents.
Args:
name: name to give to the op.
Returns:
`Tensor` `batch_shape`
"""
with ops.name_scope(self.name):
with ops.op_scope([], name):
return array_ops.shape(self._ones())
def get_batch_shape(self):
"""`TensorShape` available at graph construction time.
Same meaning as `batch_shape`. May be only partially defined.
Returns:
batch shape
"""
return self._batch_shape
def event_shape(self, name="event_shape"):
"""Shape of a sample from a single distribution as a 1-D int32 `Tensor`.
Args:
name: name to give to the op.
Returns:
`Tensor` `event_shape`
"""
with ops.name_scope(self.name):
with ops.op_scope([], name):
return constant_op.constant([], dtype=dtypes.int32)
def get_event_shape(self):
"""`TensorShape` available at graph construction time.
Same meaning as `event_shape`. May be only partially defined.
Returns:
event shape
"""
return self._event_shape
@property
def mu(self):
"""Distribution parameter for the mean."""
return self._mu
@property
def sigma(self):
"""Distribution parameter for standard deviation."""
return self._sigma
def mean(self, name="mean"):
"""Mean of this distribution."""
with ops.name_scope(self.name):
with ops.op_scope([self._sigma, self._mu], name):
return self._mu * array_ops.ones_like(self._sigma)
def mode(self, name="mode"):
"""Mode of this distribution."""
return self.mean(name="mode")
def std(self, name="std"):
"""Standard deviation of this distribution."""
with ops.name_scope(self.name):
with ops.op_scope([self._sigma, self._mu], name):
return self._sigma * array_ops.ones_like(self._mu)
def variance(self, name="variance"):
"""Variance of this distribution."""
with ops.name_scope(self.name):
with ops.op_scope([], name):
return math_ops.square(self.std())
def log_pdf(self, x, name="log_pdf"):
"""Log pdf of observations in `x` under these Normal distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
log_pdf: tensor of dtype `dtype`, the log-PDFs of `x`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._mu, self._sigma, x], name):
x = ops.convert_to_tensor(x)
if x.dtype != self.dtype:
raise TypeError("Input x dtype does not match dtype: %s vs. %s"
% (x.dtype, self.dtype))
log_2_pi = constant_op.constant(math.log(2 * math.pi), dtype=self.dtype)
return (-0.5*log_2_pi - math_ops.log(self._sigma)
-0.5*math_ops.square((x - self._mu) / self._sigma))
def cdf(self, x, name="cdf"):
"""CDF of observations in `x` under these Normal distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
cdf: tensor of dtype `dtype`, the CDFs of `x`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._mu, self._sigma, x], name):
x = ops.convert_to_tensor(x)
if x.dtype != self.dtype:
raise TypeError("Input x dtype does not match dtype: %s vs. %s"
% (x.dtype, self.dtype))
# TODO(ebrevdo): wrap this in a Defun with a custom Defun
# gradient because the analytic gradient may be faster than
# automatic differentiation.
return (0.5 + 0.5*math_ops.erf(
1.0/(math.sqrt(2.0) * self._sigma)*(x - self._mu)))
def log_cdf(self, x, name="log_cdf"):
"""Log CDF of observations `x` under these Normal distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
log_cdf: tensor of dtype `dtype`, the log-CDFs of `x`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._mu, self._sigma, x], name):
return math_ops.log(self.cdf(x))
def pdf(self, x, name="pdf"):
"""The PDF of observations in `x` under these Normal distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
pdf: tensor of dtype `dtype`, the pdf values of `x`.
"""
return super(Normal, self).pdf(x, name=name)
def entropy(self, name="entropy"):
"""The entropy of Normal distribution(s).
Args:
name: The name to give this op.
Returns:
entropy: tensor of dtype `dtype`, the entropy.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._mu, self._sigma], name):
two_pi_e1 = constant_op.constant(
2 * math.pi * math.exp(1), dtype=self.dtype)
# Use broadcasting rules to calculate the full broadcast sigma.
sigma = self._sigma * array_ops.ones_like(self._mu)
return 0.5 * math_ops.log(two_pi_e1 * math_ops.square(sigma))
def sample(self, n, seed=None, name="sample"):
"""Sample `n` observations from the Normal Distributions.
Args:
n: `Scalar`, type int32, the number of observations to sample.
seed: Python integer, the random seed.
name: The name to give this op.
Returns:
samples: `[n, ...]`, a `Tensor` of `n` samples for each
of the distributions determined by broadcasting the hyperparameters.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._mu, self._sigma, n], name):
broadcast_shape = (self._mu + self._sigma).get_shape()
n = ops.convert_to_tensor(n)
shape = array_ops.concat(
0, [array_ops.pack([n]), array_ops.shape(self.mean())])
sampled = random_ops.random_normal(
shape=shape, mean=0, stddev=1, dtype=self._mu.dtype, seed=seed)
# Provide some hints to shape inference
n_val = tensor_util.constant_value(n)
final_shape = tensor_shape.vector(n_val).concatenate(broadcast_shape)
sampled.set_shape(final_shape)
return sampled * self._sigma + self._mu
@property
def is_reparameterized(self):
return True
def _ones(self):
return array_ops.ones_like(self._mu + self._sigma)
def _zeros(self):
return array_ops.zeros_like(self._mu + self._sigma)
@kullback_leibler.RegisterKL(Normal, Normal)
def _kl_normal_normal(n_a, n_b, name=None):
"""Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.
Args:
n_a: instance of a Normal distribution object.
n_b: instance of a Normal distribution object.
name: (optional) Name to use for created operations.
default is "kl_normal_normal".
Returns:
Batchwise KL(n_a || n_b)
"""
with ops.op_scope([n_a.mu, n_b.mu], name, "kl_normal_normal"):
one = constant_op.constant(1, dtype=n_a.dtype)
two = constant_op.constant(2, dtype=n_a.dtype)
half = constant_op.constant(0.5, dtype=n_a.dtype)
s_a_squared = math_ops.square(n_a.sigma)
s_b_squared = math_ops.square(n_b.sigma)
ratio = s_a_squared / s_b_squared
return (math_ops.square(n_a.mu - n_b.mu) / (two * s_b_squared)
+ half * (ratio - one - math_ops.log(ratio)))
|
dhalleine/tensorflow
|
tensorflow/contrib/distributions/python/ops/normal.py
|
Python
|
apache-2.0
| 11,692
|
[
"Gaussian"
] |
886120d94f30c90a7c08ac6775f80973034b0cbc688fa271b05a827ad7d81f98
|
"""
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Bertrand Thirion <bertrand.thirion@inria.fr>
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator
from ..utils import check_random_state
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc'):
if thresh is not None:
warnings.warn("'thresh' has been replaced by 'tol' in 0.16 "
" and will be removed in 0.18.",
DeprecationWarning)
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on `cvtype`::
(`n_states`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_states`, `n_features`) if 'diag',
(`n_states`, `n_features`, `n_features`) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type)
+ np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,)
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit(self, X):
"""Estimate model parameters with the expectation-maximization
algorithm.
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating the
GMM object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
# initialization step
X = np.asarray(X, dtype=np.float)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
for _ in range(self.n_init):
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
# this line should be removed when 'thresh' is deprecated
tol = (self.tol if self.thresh is None
else self.thresh / float(X.shape[0]))
for i in range(self.n_iter):
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
# (should compare to self.tol when dreprecated 'thresh' is
# removed)
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if change < tol:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
# self.n_iter == 0 occurs when using GMM within HMM
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weihgts.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
## some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices.
"""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template
"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
# Underflow Errors in doing post * X.T are not important
np.seterr(under='ignore')
avg_cv = np.dot(post * X.T, X) / (post.sum() + 10 * EPS)
mu = gmm.means_[c][np.newaxis]
cv[c] = (avg_cv - np.dot(mu.T, mu) + min_covar * np.eye(n_features))
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
|
loli/semisupervisedforests
|
sklearn/mixture/gmm.py
|
Python
|
bsd-3-clause
| 27,512
|
[
"Gaussian"
] |
8b9d808b1af8d8baa4a5963eb6c691e0c0c6766295ae4634d0b4965c190a75d6
|
# -*- coding: utf-8 -*-
"""test_table_streaming_support.py:
Test the streaming support in moose.Table.
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2016, Dilawar Singh"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import os
import sys
import moose
import numpy as np
print( '[INFO] Using moose form %s' % moose.__file__ )
def print_table( table ):
msg = ""
msg += " datafile : %s" % table.datafile
msg += " useStreamer: %s" % table.useStreamer
msg += ' Path: %s' % table.path
print( msg )
def test_small( ):
moose.CubeMesh( '/compt' )
r = moose.Reac( '/compt/r' )
a = moose.Pool( '/compt/a' )
a.concInit = 1
b = moose.Pool( '/compt/b' )
b.concInit = 2
c = moose.Pool( '/compt/c' )
c.concInit = 0.5
moose.connect( r, 'sub', a, 'reac' )
moose.connect( r, 'prd', b, 'reac' )
moose.connect( r, 'prd', c, 'reac' )
r.Kf = 0.1
r.Kb = 0.01
tabA = moose.Table2( '/compt/a/tabA' )
# tabA.format = 'npy'
tabA.useStreamer = True # Setting format alone is not good enough
# Setting datafile enables streamer.
tabB = moose.Table2( '/compt/b/tabB' )
tabB.datafile = 'table2.npy'
tabC = moose.Table2( '/compt/c/tabC' )
tabC.datafile = 'tablec.csv'
moose.connect( tabA, 'requestOut', a, 'getConc' )
moose.connect( tabB, 'requestOut', b, 'getConc' )
moose.connect( tabC, 'requestOut', c, 'getConc' )
moose.reinit( )
[ print_table( x) for x in [tabA, tabB, tabC] ]
runtime = 1000
print( 'Starting moose for %d secs' % runtime )
moose.start( runtime, 1 )
# Now read the numpy and csv and check the results.
a = np.loadtxt( tabA.datafile, skiprows=1 )
b = np.load( 'table2.npy' )
c = np.loadtxt( 'tablec.csv', skiprows=1 )
assert (len(a) == len(b) == len(c))
print( ' MOOSE is done' )
def buildLargeSystem(useStreamer = False):
# create a huge system.
if moose.exists('/comptB'):
moose.delete('/comptB')
moose.CubeMesh( '/comptB' )
tables = []
for i in range(300):
r = moose.Reac('/comptB/r%d'%i)
a = moose.Pool('/comptB/a%d'%i)
a.concInit = 10.0
b = moose.Pool('/comptB/b%d'%i)
b.concInit = 2.0
c = moose.Pool('/comptB/c%d'%i)
c.concInit = 0.5
moose.connect( r, 'sub', a, 'reac' )
moose.connect( r, 'prd', b, 'reac' )
moose.connect( r, 'prd', c, 'reac' )
r.Kf = 0.1
r.Kb = 0.01
# Make table name large enough such that the header is larger than 2^16
# . Numpy version 1 can't handle such a large header. If format 1 is
# then this test will fail.
t = moose.Table2('/comptB/TableO1%d'%i + 'abc'*100)
moose.connect(t, 'requestOut', a, 'getConc')
tables.append(t)
if useStreamer:
s = moose.Streamer('/comptB/streamer')
s.datafile = 'data2.npy'
print("[INFO ] Total tables %d" % len(tables))
# Add tables using wilcardFind.
s.addTables(moose.wildcardFind('/comptB/##[TYPE=Table2]'))
print("Streamer has %d table" % s.numTables)
assert s.numTables == len(tables), (s.numTables, len(tables))
moose.reinit()
moose.start(10)
if useStreamer:
# load the data
data = np.load(s.datafile)
header = str(data.dtype.names)
assert len(header) > 2**16
else:
data = { x.columnName : x.vector for x in tables }
return data
def test_large_system():
# Get data without streamer and with streamer.
# These two must be the same.
X = buildLargeSystem(False) # without streamer
Y = buildLargeSystem(True) # with streamer.
# X has no time.
assert len(X) == len(Y.dtype.names)-1, (len(X), Y.dtype)
# same column names.
xNames = list(X.keys())
yNames = list(Y.dtype.names)
assert set(yNames) - set(xNames) == set(['time']), (yNames, xNames)
# Test for equality in some tables.
for i in range(1, 10):
a, b = Y[xNames[i]], X[xNames[i]]
assert a.shape == b.shape, (a.shape, b.shape)
assert (a == b).all(), (a-b)
def main( ):
test_small( )
test_large_system()
print( '[INFO] All tests passed' )
if __name__ == '__main__':
main()
|
dilawar/moose-core
|
tests/core/test_table_streaming_support.py
|
Python
|
gpl-3.0
| 4,468
|
[
"MOOSE"
] |
28ec4482872c80cd210dd49efac528502533806d09e32a24c83db8a0f6a0b27d
|
from bs_utils.utils import *
def wg_build(fasta_file, build_command, ref_path, aligner):
# ref_path is a string that contains the directory where the reference genomes are stored with
# the input Fasta filename appended
ref_path = os.path.join(ref_path,
os.path.split(fasta_file)[1] + '_'+aligner)
clear_dir(ref_path)
#---------------------------------------------------------------
# 1. First get the complementary genome (also do the reverse)
# 2. Then do CT and GA conversions
#---------------------------------------------------------------
open_log(os.path.join(ref_path, 'log'))
refd = {}
w_c2t = open(os.path.join(ref_path, 'W_C2T.fa'),'w')
c_c2t = open(os.path.join(ref_path, 'C_C2T.fa'),'w')
w_g2a = open(os.path.join(ref_path, 'W_G2A.fa'),'w')
c_g2a = open(os.path.join(ref_path, 'C_G2A.fa'),'w')
for chrom_id, chrom_seq in read_fasta(fasta_file):
serialize(chrom_seq, os.path.join(ref_path, chrom_id))
refd[chrom_id] = len(chrom_seq)
w_c2t.write('>%s\n%s\n' % (chrom_id, chrom_seq.replace("C","T")))
w_g2a.write('>%s\n%s\n' % (chrom_id, chrom_seq.replace("G","A")))
chrom_seq = reverse_compl_seq(chrom_seq)
c_c2t.write('>%s\n%s\n' % (chrom_id, chrom_seq.replace("C","T")))
c_g2a.write('>%s\n%s\n' % (chrom_id, chrom_seq.replace("G","A")))
elapsed('Preprocessing '+chrom_id)
for outf in [w_c2t, c_c2t, w_g2a, c_g2a]:
outf.close()
serialize(refd, os.path.join(ref_path,"refname"))
elapsed('Genome preprocessing')
# append ref_path to all elements of to_bowtie
to_bowtie = map(lambda f: os.path.join(ref_path, f), ['W_C2T', 'W_G2A', 'C_C2T', 'C_G2A'])
# start bowtie-build for all converted genomes and wait for the processes to finish
run_in_parallel([(build_command % { 'fname' : fname }, fname+'.log') for fname in to_bowtie])
# delete fasta files of converted genomes
if aligner != "rmap" :
delete_files(f+'.fa' for f in to_bowtie)
elapsed('Done')
|
BioInfoTools/BSVF
|
bin/BSseeker2/bs_index/wg_build.py
|
Python
|
lgpl-3.0
| 2,085
|
[
"Bowtie"
] |
3329c6f5dea11365c9314ae6b08701b72a4019a5c520880bcb8a780156a7a489
|
"""
Generalized Linear Models with Exponential Dispersion Family
"""
# Author: Christian Lorentzen <lorentzen.ch@googlemail.com>
# some parts and tricks stolen from other sklearn files.
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.optimize
from ...base import BaseEstimator, RegressorMixin
from ...utils import check_array, check_X_y
from ...utils.optimize import _check_optimize_result
from ...utils.validation import check_is_fitted, _check_sample_weight
from ..._loss.glm_distribution import (
ExponentialDispersionModel,
TweedieDistribution,
EDM_DISTRIBUTIONS
)
from .link import (
BaseLink,
IdentityLink,
LogLink,
)
def _safe_lin_pred(X, coef):
"""Compute the linear predictor taking care if intercept is present."""
if coef.size == X.shape[1] + 1:
return X @ coef[1:] + coef[0]
else:
return X @ coef
def _y_pred_deviance_derivative(coef, X, y, weights, family, link):
"""Compute y_pred and the derivative of the deviance w.r.t coef."""
lin_pred = _safe_lin_pred(X, coef)
y_pred = link.inverse(lin_pred)
d1 = link.inverse_derivative(lin_pred)
temp = d1 * family.deviance_derivative(y, y_pred, weights)
if coef.size == X.shape[1] + 1:
devp = np.concatenate(([temp.sum()], temp @ X))
else:
devp = temp @ X # same as X.T @ temp
return y_pred, devp
class GeneralizedLinearRegressor(BaseEstimator, RegressorMixin):
"""Regression via a penalized Generalized Linear Model (GLM).
GLMs based on a reproductive Exponential Dispersion Model (EDM) aim at
fitting and predicting the mean of the target y as y_pred=h(X*w).
Therefore, the fit minimizes the following objective function with L2
priors as regularizer::
1/(2*sum(s)) * deviance(y, h(X*w); s)
+ 1/2 * alpha * |w|_2
with inverse link function h and s=sample_weight.
The parameter ``alpha`` corresponds to the lambda parameter in glmnet.
Read more in the :ref:`User Guide <Generalized_linear_regression>`.
Parameters
----------
alpha : float, default=1
Constant that multiplies the penalty term and thus determines the
regularization strength. ``alpha = 0`` is equivalent to unpenalized
GLMs. In this case, the design matrix `X` must have full column rank
(no collinearities).
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the linear predictor (X @ coef + intercept).
family : {'normal', 'poisson', 'gamma', 'inverse-gaussian'} \
or an ExponentialDispersionModel instance, default='normal'
The distributional assumption of the GLM, i.e. which distribution from
the EDM, specifies the loss function to be minimized.
link : {'auto', 'identity', 'log'} or an instance of class BaseLink, \
default='auto'
The link function of the GLM, i.e. mapping from linear predictor
`X @ coeff + intercept` to prediction `y_pred`. Option 'auto' sets
the link depending on the chosen family as follows:
- 'identity' for Normal distribution
- 'log' for Poisson, Gamma and Inverse Gaussian distributions
solver : 'lbfgs', default='lbfgs'
Algorithm to use in the optimization problem:
'lbfgs'
Calls scipy's L-BFGS-B optimizer.
max_iter : int, default=100
The maximal number of iterations for the solver.
tol : float, default=1e-4
Stopping criterion. For the lbfgs solver,
the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
where ``g_j`` is the j-th component of the gradient (derivative) of
the objective function.
warm_start : bool, default=False
If set to ``True``, reuse the solution of the previous call to ``fit``
as initialization for ``coef_`` and ``intercept_``.
verbose : int, default=0
For the lbfgs solver set verbose to any positive number for verbosity.
Attributes
----------
coef_ : array of shape (n_features,)
Estimated coefficients for the linear predictor (`X @ coef_ +
intercept_`) in the GLM.
intercept_ : float
Intercept (a.k.a. bias) added to linear predictor.
n_iter_ : int
Actual number of iterations used in the solver.
"""
def __init__(self, *, alpha=1.0,
fit_intercept=True, family='normal', link='auto',
solver='lbfgs', max_iter=100, tol=1e-4, warm_start=False,
verbose=0):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.family = family
self.link = link
self.solver = solver
self.max_iter = max_iter
self.tol = tol
self.warm_start = warm_start
self.verbose = verbose
def fit(self, X, y, sample_weight=None):
"""Fit a Generalized Linear Model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : returns an instance of self.
"""
if isinstance(self.family, ExponentialDispersionModel):
self._family_instance = self.family
elif self.family in EDM_DISTRIBUTIONS:
self._family_instance = EDM_DISTRIBUTIONS[self.family]()
else:
raise ValueError(
"The family must be an instance of class"
" ExponentialDispersionModel or an element of"
" ['normal', 'poisson', 'gamma', 'inverse-gaussian']"
"; got (family={0})".format(self.family))
# Guarantee that self._link_instance is set to an instance of
# class BaseLink
if isinstance(self.link, BaseLink):
self._link_instance = self.link
else:
if self.link == 'auto':
if isinstance(self._family_instance, TweedieDistribution):
if self._family_instance.power <= 0:
self._link_instance = IdentityLink()
if self._family_instance.power >= 1:
self._link_instance = LogLink()
else:
raise ValueError("No default link known for the "
"specified distribution family. Please "
"set link manually, i.e. not to 'auto'; "
"got (link='auto', family={})"
.format(self.family))
elif self.link == 'identity':
self._link_instance = IdentityLink()
elif self.link == 'log':
self._link_instance = LogLink()
else:
raise ValueError(
"The link must be an instance of class Link or "
"an element of ['auto', 'identity', 'log']; "
"got (link={0})".format(self.link))
if not isinstance(self.alpha, numbers.Number) or self.alpha < 0:
raise ValueError("Penalty term must be a non-negative number;"
" got (alpha={0})".format(self.alpha))
if not isinstance(self.fit_intercept, bool):
raise ValueError("The argument fit_intercept must be bool;"
" got {0}".format(self.fit_intercept))
if self.solver not in ['lbfgs']:
raise ValueError("GeneralizedLinearRegressor supports only solvers"
"'lbfgs'; got {0}".format(self.solver))
solver = self.solver
if (not isinstance(self.max_iter, numbers.Integral)
or self.max_iter <= 0):
raise ValueError("Maximum number of iteration must be a positive "
"integer;"
" got (max_iter={0!r})".format(self.max_iter))
if not isinstance(self.tol, numbers.Number) or self.tol <= 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol={0!r})".format(self.tol))
if not isinstance(self.warm_start, bool):
raise ValueError("The argument warm_start must be bool;"
" got {0}".format(self.warm_start))
family = self._family_instance
link = self._link_instance
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr'],
dtype=[np.float64, np.float32],
y_numeric=True, multi_output=False)
weights = _check_sample_weight(sample_weight, X)
_, n_features = X.shape
if not np.all(family.in_y_range(y)):
raise ValueError("Some value(s) of y are out of the valid "
"range for family {0}"
.format(family.__class__.__name__))
# TODO: if alpha=0 check that X is not rank deficient
# rescaling of sample_weight
#
# IMPORTANT NOTE: Since we want to minimize
# 1/(2*sum(sample_weight)) * deviance + L2,
# deviance = sum(sample_weight * unit_deviance),
# we rescale weights such that sum(weights) = 1 and this becomes
# 1/2*deviance + L2 with deviance=sum(weights * unit_deviance)
weights = weights / weights.sum()
if self.warm_start and hasattr(self, 'coef_'):
if self.fit_intercept:
coef = np.concatenate((np.array([self.intercept_]),
self.coef_))
else:
coef = self.coef_
else:
if self.fit_intercept:
coef = np.zeros(n_features+1)
coef[0] = link(np.average(y, weights=weights))
else:
coef = np.zeros(n_features)
# algorithms for optimization
if solver == 'lbfgs':
def func(coef, X, y, weights, alpha, family, link):
y_pred, devp = _y_pred_deviance_derivative(
coef, X, y, weights, family, link
)
dev = family.deviance(y, y_pred, weights)
# offset if coef[0] is intercept
offset = 1 if self.fit_intercept else 0
coef_scaled = alpha * coef[offset:]
obj = 0.5 * dev + 0.5 * (coef[offset:] @ coef_scaled)
objp = 0.5 * devp
objp[offset:] += coef_scaled
return obj, objp
args = (X, y, weights, self.alpha, family, link)
opt_res = scipy.optimize.minimize(
func, coef, method="L-BFGS-B", jac=True,
options={
"maxiter": self.max_iter,
"iprint": (self.verbose > 0) - 1,
"gtol": self.tol,
"ftol": 1e3*np.finfo(float).eps,
},
args=args)
self.n_iter_ = _check_optimize_result("lbfgs", opt_res)
coef = opt_res.x
if self.fit_intercept:
self.intercept_ = coef[0]
self.coef_ = coef[1:]
else:
# set intercept to zero as the other linear models do
self.intercept_ = 0.
self.coef_ = coef
return self
def _linear_predictor(self, X):
"""Compute the linear_predictor = `X @ coef_ + intercept_`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
Returns
-------
y_pred : array of shape (n_samples,)
Returns predicted values of linear predictor.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=[np.float64, np.float32], ensure_2d=True,
allow_nd=False)
return X @ self.coef_ + self.intercept_
def predict(self, X):
"""Predict using GLM with feature matrix X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
Returns
-------
y_pred : array of shape (n_samples,)
Returns predicted values.
"""
# check_array is done in _linear_predictor
eta = self._linear_predictor(X)
y_pred = self._link_instance.inverse(eta)
return y_pred
def score(self, X, y, sample_weight=None):
"""Compute D^2, the percentage of deviance explained.
D^2 is a generalization of the coefficient of determination R^2.
R^2 uses squared error and D^2 deviance. Note that those two are equal
for ``family='normal'``.
D^2 is defined as
:math:`D^2 = 1-\\frac{D(y_{true},y_{pred})}{D_{null}}`,
:math:`D_{null}` is the null deviance, i.e. the deviance of a model
with intercept alone, which corresponds to :math:`y_{pred} = \\bar{y}`.
The mean :math:`\\bar{y}` is averaged by sample_weight.
Best possible score is 1.0 and it can be negative (because the model
can be arbitrarily worse).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,)
True values of target.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
D^2 of self.predict(X) w.r.t. y.
"""
# Note, default score defined in RegressorMixin is R^2 score.
# TODO: make D^2 a score function in module metrics (and thereby get
# input validation and so on)
weights = _check_sample_weight(sample_weight, X)
y_pred = self.predict(X)
dev = self._family_instance.deviance(y, y_pred, weights=weights)
y_mean = np.average(y, weights=weights)
dev_null = self._family_instance.deviance(y, y_mean, weights=weights)
return 1 - dev / dev_null
def _more_tags(self):
# create the _family_instance if fit wasn't called yet.
if hasattr(self, '_family_instance'):
_family_instance = self._family_instance
elif isinstance(self.family, ExponentialDispersionModel):
_family_instance = self.family
elif self.family in EDM_DISTRIBUTIONS:
_family_instance = EDM_DISTRIBUTIONS[self.family]()
else:
raise ValueError
return {"requires_positive_y": not _family_instance.in_y_range(-1.0)}
class PoissonRegressor(GeneralizedLinearRegressor):
"""Generalized Linear Model with a Poisson distribution.
Read more in the :ref:`User Guide <Generalized_linear_regression>`.
Parameters
----------
alpha : float, default=1
Constant that multiplies the penalty term and thus determines the
regularization strength. ``alpha = 0`` is equivalent to unpenalized
GLMs. In this case, the design matrix `X` must have full column rank
(no collinearities).
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the linear predictor (X @ coef + intercept).
max_iter : int, default=100
The maximal number of iterations for the solver.
tol : float, default=1e-4
Stopping criterion. For the lbfgs solver,
the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
where ``g_j`` is the j-th component of the gradient (derivative) of
the objective function.
warm_start : bool, default=False
If set to ``True``, reuse the solution of the previous call to ``fit``
as initialization for ``coef_`` and ``intercept_`` .
verbose : int, default=0
For the lbfgs solver set verbose to any positive number for verbosity.
Attributes
----------
coef_ : array of shape (n_features,)
Estimated coefficients for the linear predictor (`X @ coef_ +
intercept_`) in the GLM.
intercept_ : float
Intercept (a.k.a. bias) added to linear predictor.
n_iter_ : int
Actual number of iterations used in the solver.
Examples
----------
>>> from sklearn import linear_model
>>> clf = linear_model.PoissonRegressor()
>>> X = [[1, 2], [2, 3], [3, 4], [4, 3]]
>>> y = [12, 17, 22, 21]
>>> clf.fit(X, y)
PoissonRegressor()
>>> clf.score(X, y)
0.990...
>>> clf.coef_
array([0.121..., 0.158...])
>>> clf.intercept_
2.088...
>>> clf.predict([[1, 1], [3, 4]])
array([10.676..., 21.875...])
"""
def __init__(self, *, alpha=1.0, fit_intercept=True, max_iter=100,
tol=1e-4, warm_start=False, verbose=0):
super().__init__(alpha=alpha, fit_intercept=fit_intercept,
family="poisson", link='log', max_iter=max_iter,
tol=tol, warm_start=warm_start, verbose=verbose)
@property
def family(self):
# Make this attribute read-only to avoid mis-uses e.g. in GridSearch.
return "poisson"
@family.setter
def family(self, value):
if value != "poisson":
raise ValueError("PoissonRegressor.family must be 'poisson'!")
class GammaRegressor(GeneralizedLinearRegressor):
"""Generalized Linear Model with a Gamma distribution.
Read more in the :ref:`User Guide <Generalized_linear_regression>`.
Parameters
----------
alpha : float, default=1
Constant that multiplies the penalty term and thus determines the
regularization strength. ``alpha = 0`` is equivalent to unpenalized
GLMs. In this case, the design matrix `X` must have full column rank
(no collinearities).
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the linear predictor (X @ coef + intercept).
max_iter : int, default=100
The maximal number of iterations for the solver.
tol : float, default=1e-4
Stopping criterion. For the lbfgs solver,
the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
where ``g_j`` is the j-th component of the gradient (derivative) of
the objective function.
warm_start : bool, default=False
If set to ``True``, reuse the solution of the previous call to ``fit``
as initialization for ``coef_`` and ``intercept_`` .
verbose : int, default=0
For the lbfgs solver set verbose to any positive number for verbosity.
Attributes
----------
coef_ : array of shape (n_features,)
Estimated coefficients for the linear predictor (`X * coef_ +
intercept_`) in the GLM.
intercept_ : float
Intercept (a.k.a. bias) added to linear predictor.
n_iter_ : int
Actual number of iterations used in the solver.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.GammaRegressor()
>>> X = [[1, 2], [2, 3], [3, 4], [4, 3]]
>>> y = [19, 26, 33, 30]
>>> clf.fit(X, y)
GammaRegressor()
>>> clf.score(X, y)
0.773...
>>> clf.coef_
array([0.072..., 0.066...])
>>> clf.intercept_
2.896...
>>> clf.predict([[1, 0], [2, 8]])
array([19.483..., 35.795...])
"""
def __init__(self, *, alpha=1.0, fit_intercept=True, max_iter=100,
tol=1e-4, warm_start=False, verbose=0):
super().__init__(alpha=alpha, fit_intercept=fit_intercept,
family="gamma", link='log', max_iter=max_iter,
tol=tol, warm_start=warm_start, verbose=verbose)
@property
def family(self):
# Make this attribute read-only to avoid mis-uses e.g. in GridSearch.
return "gamma"
@family.setter
def family(self, value):
if value != "gamma":
raise ValueError("GammaRegressor.family must be 'gamma'!")
class TweedieRegressor(GeneralizedLinearRegressor):
"""Generalized Linear Model with a Tweedie distribution.
This estimator can be used to model different GLMs depending on the
``power`` parameter, which determines the underlying distribution.
Read more in the :ref:`User Guide <Generalized_linear_regression>`.
Parameters
----------
power : float, default=0
The power determines the underlying target distribution according
to the following table:
+-------+------------------------+
| Power | Distribution |
+=======+========================+
| 0 | Normal |
+-------+------------------------+
| 1 | Poisson |
+-------+------------------------+
| (1,2) | Compound Poisson Gamma |
+-------+------------------------+
| 2 | Gamma |
+-------+------------------------+
| 3 | Inverse Gaussian |
+-------+------------------------+
For ``0 < power < 1``, no distribution exists.
alpha : float, default=1
Constant that multiplies the penalty term and thus determines the
regularization strength. ``alpha = 0`` is equivalent to unpenalized
GLMs. In this case, the design matrix `X` must have full column rank
(no collinearities).
link : {'auto', 'identity', 'log'}, default='auto'
The link function of the GLM, i.e. mapping from linear predictor
`X @ coeff + intercept` to prediction `y_pred`. Option 'auto' sets
the link depending on the chosen family as follows:
- 'identity' for Normal distribution
- 'log' for Poisson, Gamma and Inverse Gaussian distributions
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the linear predictor (X @ coef + intercept).
max_iter : int, default=100
The maximal number of iterations for the solver.
tol : float, default=1e-4
Stopping criterion. For the lbfgs solver,
the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
where ``g_j`` is the j-th component of the gradient (derivative) of
the objective function.
warm_start : bool, default=False
If set to ``True``, reuse the solution of the previous call to ``fit``
as initialization for ``coef_`` and ``intercept_`` .
verbose : int, default=0
For the lbfgs solver set verbose to any positive number for verbosity.
Attributes
----------
coef_ : array of shape (n_features,)
Estimated coefficients for the linear predictor (`X @ coef_ +
intercept_`) in the GLM.
intercept_ : float
Intercept (a.k.a. bias) added to linear predictor.
n_iter_ : int
Actual number of iterations used in the solver.
Examples
----------
>>> from sklearn import linear_model
>>> clf = linear_model.TweedieRegressor()
>>> X = [[1, 2], [2, 3], [3, 4], [4, 3]]
>>> y = [2, 3.5, 5, 5.5]
>>> clf.fit(X, y)
TweedieRegressor()
>>> clf.score(X, y)
0.839...
>>> clf.coef_
array([0.599..., 0.299...])
>>> clf.intercept_
1.600...
>>> clf.predict([[1, 1], [3, 4]])
array([2.500..., 4.599...])
"""
def __init__(self, *, power=0.0, alpha=1.0, fit_intercept=True,
link='auto', max_iter=100, tol=1e-4,
warm_start=False, verbose=0):
super().__init__(alpha=alpha, fit_intercept=fit_intercept,
family=TweedieDistribution(power=power), link=link,
max_iter=max_iter, tol=tol,
warm_start=warm_start, verbose=verbose)
@property
def family(self):
# We use a property with a setter to make sure that the family is
# always a Tweedie distribution, and that self.power and
# self.family.power are identical by construction.
dist = TweedieDistribution(power=self.power)
# TODO: make the returned object immutable
return dist
@family.setter
def family(self, value):
if isinstance(value, TweedieDistribution):
self.power = value.power
else:
raise TypeError("TweedieRegressor.family must be of type "
"TweedieDistribution!")
|
bnaul/scikit-learn
|
sklearn/linear_model/_glm/glm.py
|
Python
|
bsd-3-clause
| 24,806
|
[
"Gaussian"
] |
ec13a2ec31d95b79e8281d7fd3aff31ef637285d383f2a795375a34d7b4fcc52
|
""" ResourceStatus
Module use to switch between the CS and the RSS.
"""
from datetime import datetime, timedelta
import math
from time import sleep
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.DIRACSingleton import DIRACSingleton
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
from DIRAC.ResourceStatusSystem.Utilities.RSSCacheNoThread import RSSCache
from DIRAC.ResourceStatusSystem.Utilities.RssConfiguration import RssConfiguration
from DIRAC.ResourceStatusSystem.Utilities.InfoGetter import getPoliciesThatApply
from DIRAC.Core.Utilities import DErrno
class ResourceStatus( object ):
"""
ResourceStatus helper that connects to CS if RSS flag is not Active. It keeps
the connection to the db / server as an object member, to avoid creating a new
one massively.
"""
__metaclass__ = DIRACSingleton
def __init__( self, rssFlag = None ):
"""
Constructor, initializes the rssClient.
"""
self.log = gLogger.getSubLogger( self.__class__.__name__ )
self.rssConfig = RssConfiguration()
self.__opHelper = Operations()
self.rssClient = ResourceStatusClient()
self.rssFlag = rssFlag
if rssFlag is None:
self.rssFlag = self.__getMode()
# We can set CacheLifetime and CacheHistory from CS, so that we can tune them.
cacheLifeTime = int( self.rssConfig.getConfigCache() )
# RSSCache only affects the calls directed to RSS, if using the CS it is not used.
self.rssCache = RSSCache( cacheLifeTime, self.__updateRssCache )
def getElementStatus( self, elementName, elementType, statusType = None, default = None ):
"""
Helper function, tries to get information from the RSS for the given
Element, otherwise, it gets it from the CS.
:param elementName: name of the element
:type elementName: str
:param elementType: type of the element (StorageElement, ComputingElement, FTS, Catalog)
:type elementType: str
:param statusType: type of the status (meaningful only when elementType==StorageElement)
:type statusType: None, str, list
:param default: defult value (meaningful only when rss is InActive)
:type default: str
:return: S_OK/S_ERROR
:rtype: dict
:Example:
>>> getElementStatus('CE42', 'ComputingElement')
S_OK( { 'CE42': { 'all': 'Active' } } } )
>>> getElementStatus('SE1', 'StorageElement', 'ReadAccess')
S_OK( { 'SE1': { 'ReadAccess': 'Banned' } } } )
>>> getElementStatus('SE1', 'ThisIsAWrongElementType', 'ReadAccess')
S_ERROR( xyz.. )
>>> getElementStatus('ThisIsAWrongName', 'StorageElement', 'WriteAccess')
S_ERROR( xyz.. )
>>> getElementStatus('A_file_catalog', 'FileCatalog')
S_OK( { 'A_file_catalog': { 'all': 'Active' } } } )
>>> getElementStatus('SE1', 'StorageElement', ['ReadAccess', 'WriteAccess'])
S_OK( { 'SE1': { 'ReadAccess': 'Banned' , 'WriteAccess': 'Active'} } } )
>>> getElementStatus('SE1', 'StorageElement')
S_OK( { 'SE1': { 'ReadAccess': 'Probing' ,
'WriteAccess': 'Active',
'CheckAccess': 'Degraded',
'RemoveAccess': 'Banned'} } } )
"""
allowedParameters = ["StorageElement", "ComputingElement", "FTS", "Catalog"]
if elementType not in allowedParameters:
return S_ERROR("%s in not in the list of the allowed parameters: %s" % (elementType, allowedParameters))
# Apply defaults
if not statusType:
if elementType == "StorageElement":
statusType = ['ReadAccess', 'WriteAccess', 'CheckAccess', 'RemoveAccess']
elif elementType == "ComputingElement":
statusType = ['all']
elif elementType == "FTS":
statusType = ['all']
elif elementType == "Catalog":
statusType = ['all']
if self.rssFlag:
return self.__getRSSElementStatus( elementName, elementType, statusType )
else:
return self.__getCSElementStatus( elementName, elementType, statusType, default )
def setElementStatus( self, elementName, elementType, statusType, status, reason = None, tokenOwner = None ):
""" Tries set information in RSS and in CS.
:param elementName: name of the element
:type elementName: str
:param elementType: type of the element (StorageElement, ComputingElement, FTS, Catalog)
:type elementType: str
:param statusType: type of the status (meaningful only when elementType==StorageElement)
:type statusType: str
:param reason: reason for setting the status
:type reason: str
:param tokenOwner: owner of the token (meaningful only when rss is Active)
:type tokenOwner: str
:return: S_OK/S_ERROR
:rtype: dict
:Example:
>>> setElementStatus('CE42', 'ComputingElement', 'all', 'Active')
S_OK( xyz.. )
>>> setElementStatus('SE1', 'StorageElement', 'ReadAccess', 'Banned')
S_OK( xyz.. )
"""
if self.rssFlag:
return self.__setRSSElementStatus( elementName, elementType, statusType, status, reason, tokenOwner )
else:
return self.__setCSElementStatus( elementName, elementType, statusType, status )
################################################################################
def __updateRssCache( self ):
""" Method used to update the rssCache.
It will try 5 times to contact the RSS before giving up
"""
meta = { 'columns' : [ 'Name', 'ElementType', 'StatusType', 'Status' ] }
for ti in range( 5 ):
rawCache = self.rssClient.selectStatusElement( 'Resource', 'Status', meta = meta )
if rawCache['OK']:
break
self.log.warn( "Can't get resource's status", rawCache['Message'] + "; trial %d" % ti )
sleep( math.pow( ti, 2 ) )
self.rssClient = ResourceStatusClient()
if not rawCache[ 'OK' ]:
return rawCache
return S_OK( getCacheDictFromRawData( rawCache[ 'Value' ] ) )
################################################################################
def __getRSSElementStatus( self, elementName, elementType, statusType ):
""" Gets from the cache or the RSS the Elements status. The cache is a
copy of the DB table. If it is not on the cache, most likely is not going
to be on the DB.
There is one exception: item just added to the CS, e.g. new Element.
The period between it is added to the DB and the changes are propagated
to the cache will be inconsistent, but not dangerous. Just wait <cacheLifeTime>
minutes.
:param elementName: name of the element
:type elementName: str
:param elementType: type of the element (StorageElement, ComputingElement, FTS, Catalog)
:type elementType: str
:param statusType: type of the status (meaningful only when elementType==StorageElement,
otherwise it is 'all' or ['all'])
:type statusType: str, list
"""
cacheMatch = self.rssCache.match( elementName, elementType, statusType )
self.log.debug( '__getRSSElementStatus' )
self.log.debug( cacheMatch )
return cacheMatch
def __getCSElementStatus( self, elementName, elementType, statusType, default ):
""" Gets from the CS the Element status
:param elementName: name of the element
:type elementName: str
:param elementType: type of the element (StorageElement, ComputingElement, FTS, Catalog)
:type elementType: str
:param statusType: type of the status (meaningful only when elementType==StorageElement)
:type statusType: str, list
:param default: defult value
:type default: None, str
"""
# DIRAC doesn't store the status of ComputingElements nor FTS in the CS, so here we can just return 'Active'
if elementType in ('ComputingElement', 'FTS'):
return S_OK( { elementName: { 'all': 'Active'} } )
# If we are here it is because elementType is either 'StorageElement' or 'Catalog'
if elementType == 'StorageElement':
cs_path = "/Resources/StorageElements"
elif elementType == 'Catalog':
cs_path = "/Resources/FileCatalogs"
statusType = ['Status']
if not isinstance( elementName, list ):
elementName = [ elementName ]
if not isinstance( statusType, list ):
statusType = [ statusType ]
result = {}
for element in elementName:
for sType in statusType:
# Look in standard location, 'Active' by default
res = gConfig.getValue( "%s/%s/%s" % ( cs_path, element, sType ), 'Active' )
result.setdefault( element, {} )[sType] = res
if result:
return S_OK( result )
if default is not None:
defList = [ [ el, statusType, default ] for el in elementName ]
return S_OK( getDictFromList( defList ) )
_msg = "Element '%s', with statusType '%s' is unknown for CS."
return S_ERROR( DErrno.ERESUNK, _msg % ( elementName, statusType ) )
def __setRSSElementStatus( self, elementName, elementType, statusType, status, reason, tokenOwner ):
"""
Sets on the RSS the Elements status
"""
expiration = datetime.utcnow() + timedelta( days = 1 )
self.rssCache.acquireLock()
try:
res = self.rssClient.addOrModifyStatusElement( 'Resource', 'Status', name = elementName,
elementType = elementType, status = status,
statusType = statusType, reason = reason,
tokenOwner = tokenOwner, tokenExpiration = expiration )
if res[ 'OK' ]:
self.rssCache.refreshCache()
if not res[ 'OK' ]:
_msg = 'Error updating Element (%s,%s,%s)' % ( elementName, statusType, status )
gLogger.warn( 'RSS: %s' % _msg )
return res
finally:
# Release lock, no matter what.
self.rssCache.releaseLock()
def __setCSElementStatus( self, elementName, elementType, statusType, status ):
"""
Sets on the CS the Elements status
"""
# DIRAC doesn't store the status of ComputingElements nor FTS in the CS, so here we can just do nothing
if elementType in ('ComputingElement', 'FTS'):
return S_OK()
# If we are here it is because elementType is either 'StorageElement' or 'Catalog'
statuses = self.rssConfig.getConfigStatusType( elementType )
if statusType not in statuses:
gLogger.error( "%s is not a valid statusType" % statusType )
return S_ERROR( "%s is not a valid statusType: %s" % ( statusType, statuses ) )
if elementType == 'StorageElement':
cs_path = "/Resources/StorageElements"
elif elementType == 'Catalog':
cs_path = "/Resources/FileCatalogs"
#FIXME: This a probably outdated location (new one is in /Operations/[]/Services/Catalogs)
# but needs to be VO-aware
statusType = 'Status'
csAPI = CSAPI()
csAPI.setOption( "%s/%s/%s/%s" % ( cs_path, elementName, elementType, statusType ), status )
res = csAPI.commitChanges()
if not res[ 'OK' ]:
gLogger.warn( 'CS: %s' % res[ 'Message' ] )
return res
def __getMode( self ):
"""
Get's flag defined ( or not ) on the RSSConfiguration. If defined as 1,
we use RSS, if not, we use CS.
"""
res = self.rssConfig.getConfigState()
if res == 'Active':
if self.rssClient is None:
self.rssClient = ResourceStatusClient()
return True
self.rssClient = None
return False
def isStorageElementAlwaysBanned( self, seName, statusType ):
""" Checks if the AlwaysBanned policy is applied to the SE given as parameter
:param seName : string, name of the SE
:param statusType : ReadAcces, WriteAccess, RemoveAccess, CheckAccess
:returns: S_OK(True/False)
"""
res = getPoliciesThatApply( {'name' : seName, 'statusType' : statusType} )
if not res['OK']:
self.log.error( "isStorageElementAlwaysBanned: unable to get the information", res['Message'] )
return res
isAlwaysBanned = 'AlwaysBanned' in [policy['type'] for policy in res['Value']]
return S_OK( isAlwaysBanned )
################################################################################
def getDictFromList( fromList ):
"""
Auxiliary method that given a list returns a dictionary of dictionaries:
{ site1 : { statusType1 : st1, statusType2 : st2 }, ... }
"""
res = {}
for listElement in fromList:
site, sType, status = listElement
if not res.has_key( site ):
res[ site ] = {}
res[ site ][ sType ] = status
return res
def getCacheDictFromRawData( rawList ):
"""
Formats the raw data list, which we know it must have tuples of four elements.
( element1, element2, element3, elementt4 ) into a dictionary of tuples with the format
{ ( element1, element2, element3 ): element4 )}.
The resulting dictionary will be the new Cache.
It happens that element1 is elementName,
element2 is elementType,
element3 is statusType,
element4 is status.
:Parameters:
**rawList** - `list`
list of three element tuples [( element1, element2, element3, element4 ),... ]
:return: dict of the form { ( elementName, elementType, statusType ) : status, ... }
"""
res = {}
for entry in rawList:
res.update( { (entry[0], entry[1], entry[2]) : entry[3] } )
return res
################################################################################
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
Andrew-McNab-UK/DIRAC
|
ResourceStatusSystem/Client/ResourceStatus.py
|
Python
|
gpl-3.0
| 13,788
|
[
"DIRAC"
] |
e6dbe818be9f746a3f3ea6e6f5a121f0285bac94cfbd1c1529f94adab5d04160
|
# -*- coding: utf-8 -*-
#
# nistats documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
import sphinx_gallery
# We also add the directory just above to enable local imports of nistats
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
('sphinx.ext.imgmath' # only available for sphinx >= 1.4
if sphinx.version_info[:2] >= (1, 4)
else 'sphinx.ext.pngmath'),
'sphinx.ext.intersphinx',
'numpydoc.numpydoc',
'sphinx_gallery.gen_gallery',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Nistats'
copyright = u'The nistats developers 2010-2016'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
import nistats
release = nistats.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
exclude_patterns = ['tune_toc.rst',
'includes/big_toc_css.rst',
'includes/bigger_toc_css.rst',
]
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'nistats'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'nature.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {'oldversion':False, 'collapsiblesidebar': False}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "functional MRI for NeuroImaging"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'Nistats'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/nistats-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'PythonScientic'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'nistats.tex', u'functional MRI in python',
"""Bertrand Thirion"""
+ r"\\\relax ~\\\relax http://nistats.github.io",
'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/nistats-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\let\oldfootnote\footnote
\def\footnote#1{\oldfootnote{\small #1}}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
latex_elements = {
'classoptions': ',oneside',
'babel': '\\usepackage[english]{babel}',
# Get completely rid of index
'printindex': '',
}
# If false, no module index is generated.
latex_use_modindex = False
latex_domain_indices = False
# Show the page numbers in the references
latex_show_pagerefs = True
# Show URLs in footnotes
latex_show_urls = 'footnote'
trim_doctests_flags = True
_python_doc_base = 'http://docs.python.org/2.7'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
_python_doc_base: None,
'http://docs.scipy.org/doc/numpy': None,
'http://docs.scipy.org/doc/scipy/reference': None,
'http://matplotlib.org/': None,
'http://scikit-learn.org/stable': None,
'http://nipy.org/nibabel': None,
'http://nilearn.github.io': None,
# add line for nilearn
# add line for patsy
#'http://scikit-image.org/docs/0.8.0/': None,
#'http://docs.enthought.com/mayavi/mayavi/': None,
#'http://statsmodels.sourceforge.net/': None,
#'http://pandas.pydata.org': None,
}
extlinks = {
'simple': (_python_doc_base + '/reference/simple_stmts.html#%s', ''),
'compound': (_python_doc_base + '/reference/compound_stmts.html#%s', ''),
}
sphinx_gallery_conf = {
'doc_module' : 'nistats',
'backreferences_dir': os.path.join('modules', 'generated'),
'reference_url' : {
'nilearn': 'http://nilearn.github.io',
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.11.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.17.0/reference',
'nibabel': 'http://nipy.org/nibabel',
'sklearn': 'http://scikit-learn.org/stable',
'patsy': 'http://patsy.readthedocs.io/en/latest/',
'pandas': 'http://pandas.pydata.org/pandas-docs/stable/'}
}
# Get rid of spurious warnings due to some interaction between
# autosummary and numpydoc. See
# https://github.com/phn/pytpm/issues/3#issuecomment-12133978 for more
# details
numpydoc_show_class_members = False
def touch_example_backreferences(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
# Add the 'copybutton' javascript, to hide/show the prompt in code
# examples
def setup(app):
app.add_javascript('copybutton.js')
app.connect('autodoc-process-docstring', touch_example_backreferences)
|
bthirion/nistats
|
doc/conf.py
|
Python
|
bsd-3-clause
| 10,477
|
[
"Mayavi"
] |
999674021c26e7d13a29db6105181c02f830041b8534bb347c7d2405d8309899
|
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import List
from typing import Dict
from kivy.uix.settings import Settings as KivySettings
from kivy.config import Config
from ORCA.definition.Definition import cDefinition
from ORCA.definition.DefinitionPathes import cDefinitionPathes
from ORCA.download.InstalledReps import cInstalledReps
from ORCA.download.RepManagerEntry import cRepManagerEntry
from ORCA.settings.BuildSettingOptionList import BuildSettingOptionListDictVar
from ORCA.settings.BuildSettingOptionList import BuildSettingOptionListVar
from ORCA.settings.setttingtypes.Public import RegisterSettingTypes
from ORCA.utils.FileName import cFileName
from ORCA.utils.LoadFile import LoadFile
from ORCA.utils.TypeConvert import ToStringVersion
from ORCA.vars.Access import GetVar
from ORCA.vars.Helpers import GetVarList
from ORCA.vars.Replace import ReplaceVars
import ORCA.Globals as Globals
__all__ = ['Build_Settings','BuildSettingsStringPowerStatus']
def Build_Settings(oSettings:KivySettings) -> None:
"""
will be called, when showing the settings dialog
We add a further panel for the orca.ini settings
If we had a successful init, lets build the default full settings view
otherwise just build a very minimalistic settings dialog, where the user can just change the ini path
"""
RegisterSettingTypes(oSettings)
if Globals.bInit:
uOrcaSettingsJSON:str = BuildSettingsString()
oSettings.add_json_panel(u'ORCA', Globals.oOrcaConfigParser, data=uOrcaSettingsJSON)
uBuildSettingsStringDefinitionList:str = BuildSettingsStringDefinitionList()
if uBuildSettingsStringDefinitionList != '':
oSettings.add_json_panel(ReplaceVars('$lvar(580)'),Globals.oDefinitionConfigParser, data=uBuildSettingsStringDefinitionList)
# Add the Info panel
oSettings.add_json_panel(ReplaceVars('$lvar(585)'), Globals.oOrcaConfigParser, data=BuildSettingsStringInfo())
# add the tools settings
# and pass to kivy
oSettings.add_json_panel(ReplaceVars('$lvar(572)'), Globals.oOrcaConfigParser, data=BuildSettingsStringTools())
# add the Online settings
# and pass to kivy
oSettings.add_json_panel(ReplaceVars('$lvar(699)'), Globals.oOrcaConfigParser, data=BuildSettingsStringOnlineResources())
else:
# just build the small settings
oSettings.add_json_panel(u'ORCA', Globals.oOrcaConfigParser, data=BuildSmallSettingsString())
def GetJsonFromSettingFileName(uSettingFileName:str) -> str:
oFnSetting:cFileName = cFileName(Globals.oPathAppReal + "ORCA/settings/settingstrings") + uSettingFileName
if not oFnSetting.Exists():
oFnSetting: cFileName = cFileName(Globals.oPathApp + "ORCA/settings/settingstrings") + uSettingFileName
return ReplaceVars(LoadFile(oFileName=oFnSetting))
def ScanDefinitionNames() -> Dict:
"""
Parses the Definition description to give definition names
"""
uDefinitionName:str
oDefinitionPathes:cDefinitionPathes
oRepManagerEntry:cRepManagerEntry
dDefinitionReps:Dict={}
aHide:List[str] = ["appfavorites_template","cmdfavorites_template","tvfavorites_template","activity_template"]
for uDefinitionName in Globals.aDefinitionList:
if not uDefinitionName in aHide:
if Globals.dDefinitionPathes.get(uDefinitionName) is None:
oDefinitionPathes=cDefinitionPathes(uDefinitionName=uDefinitionName)
Globals.dDefinitionPathes[uDefinitionName]=oDefinitionPathes
oRepManagerEntry=cRepManagerEntry(oFileName=Globals.dDefinitionPathes[uDefinitionName].oFnDefinition)
if oRepManagerEntry.ParseFromXML():
dDefinitionReps[uDefinitionName]=oRepManagerEntry.oRepEntry.uName
return dDefinitionReps
def BuildSettingsString() -> str:
""" Create a Interface list with the used interfaces at the top """
uInterFacename:str
iLast:int
i:int
aInterfaceList:List[str] = []
for uInterFacename in Globals.oInterFaces.dUsedInterfaces:
uInterFacename=ReplaceVars(uInterFacename)
if not uInterFacename in aInterfaceList and uInterFacename!=u'':
aInterfaceList.append(uInterFacename)
for uInterFacename in Globals.oInterFaces.aObjectNameList:
if not uInterFacename in aInterfaceList:
aInterfaceList.append(uInterFacename)
for uInterFacename in aInterfaceList:
if Globals.oInterFaces.GetInterface(uInterFacename) is None:
Globals.oInterFaces.LoadInterface(uInterFacename)
Globals.oInterFaces.CreateJsonSectionList(aObjectList=aInterfaceList)
# put the templates to the end
iLast=len(Globals.aDefinitionList)-1
for i in range(0,len(Globals.aDefinitionList)):
if i>=iLast:
break
if Globals.aDefinitionList[i].endswith(u"_template"):
Globals.aDefinitionList[i],Globals.aDefinitionList[iLast]=Globals.aDefinitionList[iLast],Globals.aDefinitionList[i]
iLast -= 1
dDefinitionReps:Dict=ScanDefinitionNames()
Globals.oScripts.LoadScripts()
BuildSettingOptionListVar(Globals.aLanguageList, "SETTINGS_LANGUAGELIST")
BuildSettingOptionListVar(Globals.oLanguage.oLocales.oLocalesEntries, "SETTINGS_LANGUAGELOCALES")
BuildSettingOptionListVar(Globals.oScripts.aObjectNameList, "SETTINGS_SCRIPTNAMELIST")
BuildSettingOptionListVar(Globals.oScripts.aObjectNameListWithConfig, "SETTINGS_SCRIPTNAMELISTWITHCONFIG")
BuildSettingOptionListVar(aInterfaceList, "SETTINGS_INTERFACENAMELIST")
BuildSettingOptionListVar(Globals.oSound.aSoundsList, "SETTINGS_SOUNDLIST")
BuildSettingOptionListVar(Globals.aSkinList, "SETTINGS_SKINLIST")
BuildSettingOptionListVar(Globals.aTransitionTypes, "SETTINGS_TRANSITIONTYPES")
BuildSettingOptionListVar(Globals.aTransitionDirections, "SETTINGS_TRANSITIONDIRECTIONS")
BuildSettingOptionListDictVar(dDefinitionReps, "SETTINGS_DEFINITIONLIST")
return GetJsonFromSettingFileName("setting_orca.txt")
def BuildSettingsStringDefinitionList() -> str:
""" Build the settings for the ORCA DefinitionList """
uMainSetting:str = u''
uSubSetting:str = u''
uOrcaSettingsJSON:str = u''
uPublicTitle:str
oDef:cDefinition
iStart:int
# aDefinitionListSortedTitle:List[cDefinition] = sorted(Globals.oDefinitions, key=lambda entry: entry.uDefPublicTitle)
aDefinitions:List[cDefinition] = []
for uDefKey in Globals.oDefinitions:
aDefinitions.append(Globals.oDefinitions[uDefKey])
aDefinitionListSorted:List[cDefinition] = sorted(aDefinitions, key=lambda entry: entry.uDefPublicTitle)
for oDef in aDefinitionListSorted:
if len(oDef.dDefinitionSettingsJSON)>0:
uPublicTitle = oDef.uDefPublicTitle
iStart = uPublicTitle.find( '[' )
if iStart != -1 :
uPublicTitle = uPublicTitle[:iStart]
if oDef==Globals.oDefinitions[0]:
uMainSetting= u'{"type": "buttons","title": "%s","desc": "%s","section": "ORCA","key": "button_changedefinitionsetting","buttons":[{"title":"$lvar(716)","id":"button_%s"}]}' %(uPublicTitle,oDef.uDefDescription,oDef.uAlias)
else:
uSubSetting+= u'{"type": "buttons","title": "%s","desc": "%s","section": "ORCA","key": "button_changedefinitionsetting","buttons":[{"title":"$lvar(716)","id":"button_%s"}]},' %(uPublicTitle,oDef.uDefDescription,oDef.uAlias )
if uMainSetting != u'':
uOrcaSettingsJSON =u'[{ "type": "title","title": "$lvar(717)" },\n %s]' % uMainSetting
if uSubSetting!=u'':
uOrcaSettingsJSON = u'%s,{ "type": "title","title": "$lvar(718)" },\n %s]' % (uOrcaSettingsJSON[:-1],uSubSetting[:-1])
else:
if uSubSetting!=u'':
uOrcaSettingsJSON = u'[{ "type": "title","title": "$lvar(718)" },\n %s]' % (uSubSetting[:-1])
uOrcaSettingsJSON=uOrcaSettingsJSON.replace("'","\'")
uOrcaSettingsJSON=ReplaceVars(uOrcaSettingsJSON)
return uOrcaSettingsJSON
def BuildSettingsStringInfo() -> str:
""" Build the settings for the ORCA Info panel """
return GetJsonFromSettingFileName("setting_info.txt")
def BuildSettingsStringTools() -> str:
""" Build the settings for the ORCA tools """
uOrcaSettingsJSON =u'[{ "type": "title","title": "$lvar(573)" },\n' \
u'{"type": "buttons","title": "$lvar(574)","desc": "$lvar(575)","section": "ORCA","key": "button_clear_atlas","buttons":[{"title":"$lvar(576)","id":"button_clear_atlas"}]},\n' \
u'{ "type": "title","title": "$lvar(633)" },\n' \
u'{"type": "buttons","title": "$lvar(720)","desc": "$lvar(721)","section": "ORCA","key": "button_discover_results","buttons":[{"title":"$lvar(722)","id":"button_discover_results"}]},\n' \
u'{"type": "buttons","title": "$lvar(760)","desc": "$lvar(761)","section": "ORCA","key": "button_discover_rediscover","buttons":[{"title":"$lvar(722)","id":"button_discover_rediscover"},{"title":"$lvar(729)","id":"button_discover_rediscover_force"}]}]'
uOrcaSettingsJSON=AddScriptSetting(uSettingName="ORCA",uSettingPage=ReplaceVars("$lvar(572)"),uOrcaSettingsJSON=uOrcaSettingsJSON)
uOrcaSettingsJSON=ReplaceVars(uOrcaSettingsJSON)
return uOrcaSettingsJSON
def BuildSettingsStringOnlineResources() -> str:
""" Build the settings for the ORCA Online Resource """
iCountBlanks:int = 0
i:int
uReps:str = ''
uKey:str
oInstalledRep: cInstalledReps
aSubList: List[cInstalledReps]
for i in range(Globals.iCntRepositories):
if Globals.aRepositories[i]=='':
iCountBlanks+=1
if iCountBlanks>1:
continue
uReps+=u'{"type": "string","title": "$lvar(671)","desc": "$lvar(672)","section": "ORCA","key": "repository%d"},\n' % i
uOrcaSettingsJSON =u'[{ "type": "title","title": "$lvar(670)" },\n' \
'%s' \
u'{ "type": "title","title": "$lvar(680)" },\n' \
u'{"type": "buttons","title": "$lvar(681)","desc": "$lvar(682)","section": "ORCA","key": "button_getonline","buttons":[{"title":"$lvar(678)","id":"button_getonline"}]}' \
u']' % uReps
if len(Globals.dInstalledReps)>0:
uOrcaSettingsJSON=uOrcaSettingsJSON[:-1]
aSubList = []
for (uKey,oInstalledRep) in Globals.dInstalledReps.items():
aSubList.append(oInstalledRep)
aSubList.sort(key = lambda x: x.uType)
uOldType:str = u''
uOrcaSettingsJSON+=u',{ "type": "title","title": "$lvar(679)" },\n'
for oInstalledRep in aSubList:
if uOldType!=oInstalledRep.uType:
uOldType=oInstalledRep.uType
uName="???"
for tTup in Globals.aRepNames:
if tTup[1]==oInstalledRep.uType:
uName=tTup[0]
uOrcaSettingsJSON+=u'{ "type": "title","title": "-> %s" },\n' % uName
uOrcaSettingsJSON+=u'{"type": "buttons","title": "%s","desc": "$lvar(751): %s","section": "ORCA","key": "button_installed_reps","buttons":[{"title":"$lvar(752)","id":"button_updaterep:%s:%s"}]},\n' % (oInstalledRep.uName,ToStringVersion(oInstalledRep.iVersion),oInstalledRep.uType,oInstalledRep.uName)
uOrcaSettingsJSON=uOrcaSettingsJSON[:-2]
uOrcaSettingsJSON+=u']'
uOrcaSettingsJSON=uOrcaSettingsJSON.replace("'","\'")
uOrcaSettingsJSON=AddScriptSetting(uSettingName="TOOLS",uSettingPage=ReplaceVars("$lvar(699)"),uOrcaSettingsJSON=uOrcaSettingsJSON)
uOrcaSettingsJSON=ReplaceVars(uOrcaSettingsJSON)
return uOrcaSettingsJSON
def BuildSmallSettingsString() -> str:
""" just build the small settings """
uOrcaSettingsJSON=u'[{ "type": "title","title": "Initialisation" },\n' \
u'{"type": "path","title": "Path to Orca Files","desc": "Sets the file root path for Orca files (Definitions, etc)","section": "ORCA","key": "rootpath"}\n' \
u']'
return uOrcaSettingsJSON
def BuildSettingsStringPowerStatus() -> str:
""" Build the settings for the Power Stati """
uSection:str
uVarNameKey:str
uIndexGroup:str
uActivityGroupName:str
uActivityName:str
oConfig:Config
aPowerListDevices:List[str] = []
aPowerListActivities:List[str] = []
iLeftBracketPos:int
iRightBracketPos:int
uPowerStatusJSON:str =u'['
aPowerList=sorted(GetVarList(uFilter = "POWERSTATUS"))
for uKey in aPowerList:
if uKey.startswith("POWERSTATUS_"):
aPowerListDevices.append(uKey)
for uKey in aPowerList:
if u"ACTIVITY_POWERSTATUS[" in uKey:
aPowerListActivities.append(uKey)
uSection = Globals.uDefinitionName
uSection = uSection.replace(u' ', u'_')
oConfig=Globals.oDefinitionConfigParser
if len(aPowerListDevices):
uPowerStatusJSON+=ReplaceVars(u'{ "type": "title","title": "$lvar(2001)" },\n')
for uVarNameKey in aPowerListDevices:
uPowerStatusJSON+= u'{"type": "bool","title": "%s","desc": "","section": "%s","key": "powerstatus_%s"},\n' %(uVarNameKey,uSection,uVarNameKey.lower())
if GetVar(uVarName = uVarNameKey)=="ON":
oConfig.set(uSection, "powerstatus_"+uVarNameKey.lower(), "1")
else:
oConfig.set(uSection, "powerstatus_"+uVarNameKey.lower(), "0")
if len(aPowerListActivities):
uPowerStatusJSON+=ReplaceVars(u'{ "type": "title","title": "$lvar(2000)" },\n')
for uVarNameKey in aPowerListActivities:
iLeftBracketPos=uVarNameKey.find('[')
if iLeftBracketPos != -1:
iRightBracketPos = uVarNameKey.find(']',iLeftBracketPos)
if iRightBracketPos != -1:
uIndexGroup=uVarNameKey[iLeftBracketPos+1:iRightBracketPos]
uActivityGroupName=GetVar(uVarName = "ACTIVITYGROUPNAME["+uIndexGroup+"]")
uActivityName=GetVar(uVarName = "ACTIVITY_NAME"+uVarNameKey[iLeftBracketPos:])
if uActivityName:
uPowerStatusJSON+= u'{"type": "bool","title": "%s %s","desc": "%s","section": "%s","key": "powerstatus_%s"},\n' %(uActivityGroupName,uActivityName,uVarNameKey,uSection,uVarNameKey.lower())
if GetVar(uVarName = uVarNameKey)=="ON":
oConfig.set(uSection, "powerstatus_"+uVarNameKey.lower(), "1")
else:
oConfig.set(uSection, "powerstatus_"+uVarNameKey.lower(), "0")
if len(uPowerStatusJSON)>1:
uPowerStatusJSON=uPowerStatusJSON[:-2]
uPowerStatusJSON+=u']'
else:
uPowerStatusJSON=u'[]'
oConfig.write()
return uPowerStatusJSON
def AddScriptSetting(uSettingName,uSettingPage,uOrcaSettingsJSON):
dTitleSettings = {}
uTmp = u","
for uScripKey in Globals.oScripts.dScriptSettingPlugins:
oScriptSettingPlugins = Globals.oScripts.dScriptSettingPlugins[uScripKey]
if oScriptSettingPlugins.uSettingName==uSettingName and ReplaceVars(oScriptSettingPlugins.uSettingPage)==uSettingPage:
uSettingTitle = ReplaceVars(oScriptSettingPlugins.uSettingTitle)
if dTitleSettings.get(uSettingTitle) is None:
dTitleSettings[uSettingTitle]=[u'{ "type": "title","title": "%s" }' % oScriptSettingPlugins.uSettingTitle]
for uSettingJson in oScriptSettingPlugins.aSettingJson:
dTitleSettings[uSettingTitle].append(uSettingJson)
if len(dTitleSettings)>0:
for uKey in dTitleSettings:
for uLine in dTitleSettings[uKey]:
uTmp=uTmp+uLine+",\n"
uOrcaSettingsJSON=uOrcaSettingsJSON[:-1]+uTmp[:-2]+u"]"
return uOrcaSettingsJSON
|
thica/ORCA-Remote
|
src/ORCA/settings/AppSettings.py
|
Python
|
gpl-3.0
| 17,609
|
[
"ORCA"
] |
c35f30b76bcdbc2fa99707ecb104a3e6dcb7bf223ffe341d4ab8277fc43f147e
|
#!/bin/env python
"""Script to call the DataRecoveryAgent functionality by hand."""
from DIRAC import S_OK, gLogger
from DIRAC.Core.Base import Script
__RCSID__ = '$Id$'
class Params(object):
"""Collection of Parameters set via CLI switches."""
def __init__(self):
self.enabled = False
self.transID = 0
def setEnabled(self, _):
self.enabled = True
return S_OK()
def setTransID(self, transID):
self.transID = int(transID)
return S_OK()
def registerSwitches(self):
Script.registerSwitch('T:', 'TransID=', 'TransID to Check/Fix', self.setTransID)
Script.registerSwitch('X', 'Enabled', 'Enable the changes', self.setEnabled)
Script.setUsageMessage('\n'.join([__doc__,
'\nUsage:',
' %s [option|cfgfile] ...\n' % Script.scriptName]))
if __name__ == '__main__':
PARAMS = Params()
PARAMS.registerSwitches()
Script.parseCommandLine(ignoreErrors=False)
# Create Data Recovery Agent and run over single transformation.
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
from DIRAC.TransformationSystem.Agent.DataRecoveryAgent import DataRecoveryAgent
DRA = DataRecoveryAgent('Transformation/DataRecoveryAgent', 'Transformation/DataRecoveryAgent')
DRA.jobStatus = ['Done', 'Failed']
DRA.enabled = PARAMS.enabled
TRANSFORMATION = TransformationClient().getTransformations(condDict={'TransformationID': PARAMS.transID})
if not TRANSFORMATION['OK']:
gLogger.error('Failed to find transformation: %s' % TRANSFORMATION['Message'])
exit(1)
if not TRANSFORMATION['Value']:
gLogger.error('Did not find any transformations')
exit(1)
TRANS_INFO_DICT = TRANSFORMATION['Value'][0]
TRANS_INFO_DICT.pop('Body', None)
gLogger.notice('Found transformation: %s' % TRANS_INFO_DICT)
DRA.treatTransformation(PARAMS.transID, TRANS_INFO_DICT)
exit(0)
|
andresailer/DIRAC
|
TransformationSystem/scripts/dirac-transformation-recover-data.py
|
Python
|
gpl-3.0
| 1,938
|
[
"DIRAC"
] |
bd9917537e0122ad8c1b8521989035d002345e672dbc30732bd5f54dd7f2e13f
|
# -*- coding: utf-8 -*-
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2003-2005 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 lcc & Robert Jerome
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Portuguese version by Duarte Loreto <happyguy_pt@hotmail.com>, 2007.
# Based on the Spanish version by Julio Sanchez <julio.sanchez@gmail.com>
"""
Specific classes for relationships.
"""
#-------------------------------------------------------------------------
#
# Gprime modules
#
#-------------------------------------------------------------------------
from gprime.lib import Person
import gprime.relationship
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
_level_name_male = [ "", "primeiro", "segundo", "terceiro", "quarto", "quinto",
"sexto", "sétimo", "oitavo", "nono", "décimo", "décimo-primeiro",
"décimo-segundo", "décimo-terceiro", "décimo-quarto", "décimo-quinto",
"décimo-sexto", "décimo-sétimo", "décimo-oitavo", "décimo-nono",
"vigésimo"]
# Short forms (in apocope) used before names
_level_name_male_a = [ "", "primeiro", "segundo", "terceiro", "quarto", "quinto",
"sexto", "sétimo", "oitavo", "nono", "décimo", "décimo-primeiro",
"décimo-segundo", "décimo-terceiro", "décimo-quarto", "décimo-quinto",
"décimo-sexto", "décimo-sétimo", "décimo-oitavo", "décimo-nono",
"vigésimo"]
_level_name_female = [ "", "primeira", "segunda", "terceira", "quarta", "quinta",
"sexta", "sétima", "oitava", "nona", "décima", "décima-primeira",
"décima-segunda", "décima-terceira", "décima-quarta", "décima-quinta",
"décima-sexta", "décima-sétima", "décima-oitava", "décima-nona",
"vigésima"]
_level_name_plural = [ "", "primeiros", "segundos", "terceiros", "quartos",
"quintos", "sextos", "sétimos", "oitavos", "nonos",
"décimos", "décimos-primeiros", "décimos-segundos", "décimos-terceiros",
"décimos-quartos", "décimos-quintos", "décimos-sextos",
"décimos-sétimos", "décimos-oitavos", "décimos-nonos",
"vigésimos"]
# This plugin tries to be flexible and expect little from the following
# tables. Ancestors are named from the list for the first generations.
# When this list is not enough, ordinals are used based on the same idea,
# i.e. bisavô is 'segundo avô' and so on, that has been the
# traditional way in Portuguese. When we run out of ordinals we resort to
# Nº notation, that is sort of understandable if in context.
# There is a specificity for pt_BR where they can use "tataravô" instead
# of "tetravô", being both forms correct for pt_BR but just "tetravô"
# correct for pt. Translation keeps "tetravô".
_parents_level = [ "", "pais", "avós", "bisavós", "trisavós",
"tetravós", "pentavós", "hexavós", "heptavós", "octavós"]
_father_level = [ "", "pai", "avô", "bisavô", "trisavô",
"tetravô", "pentavô", "hexavô", "heptavô", "octavô"]
_mother_level = [ "", "mãe", "avó", "bisavó", "trisavó",
"tetravó", "pentavó", "hexavó", "heptavó", "octovó"]
# Higher-order terms (after "tetravô") are not standard in Portuguese.
# Check http://www.geneall.net/P/forum_msg.php?id=136774 that states
# that although some people may use other greek-prefixed forms for
# higher levels, both pt and pt_BR correct form is to use, after
# "tetravô", the "quinto avô", "sexto avô", etc.
_son_level = [ "", "filho", "neto", "bisneto",
"trineto", "tetraneto", "pentaneto", "hexaneto", "heptaneto", "octaneto"]
_daughter_level = [ "", "filha", "neta", "bisneta",
"trineta", "tetraneta", "pentaneta", "hexaneta", "heptaneta", "octaneta"]
_sister_level = [ "", "irmã", "tia", "tia avó", "tia bisavó", "tia trisavó", "tia tetravó",
"tia pentavó", "tia hexavó", "tia heptavó", "tia octovó"]
_brother_level = [ "", "irmão", "tio", "tio avô", "tio bisavô", "tio trisavô",
"tio tetravô", "tio pentavô", "tio hexavô", "tio heptavô", "tio octavô"]
_nephew_level = [ "", "sobrinho", "sobrinho neto", "sobrinho bisneto", "sobrinho trineto",
"sobrinho tetraneto", "sobrinho pentaneto", "sobrinho hexaneto",
"sobrinho heptaneto", "sobrinho octaneto"]
_niece_level = [ "", "sobrinha", "sobrinha neta", "sobrinha bisneta", "sobrinha trineta",
"sobrinha tetraneta", "sobrinha pentaneta", "sobrinha hexaneta",
"sobrinha heptaneta", "sobrinha octaneta"]
# Relatório de Parentesco
_PARENTS_LEVEL = ["", "pais", "avós", "bisavós", "tetravós",
"pentavós", "hexavós", "heptavós", "octavós"]
_CHILDREN_LEVEL = ["", "filhos", "netos", "bisnetos", "trinetos",
"tetranetos", "pentanetos", "hexanetos", "heptanetos"
"octanetos"]
_SIBLINGS_LEVEL = ["", "irmãos e irmãs", "tios e tias","tios avôs e tias avós",
"tios bisavôs e tias bisavós", "tios trisavôs e tias trisavós",
"tios tetravôs e tias tetravós", "tios pentavôs e tias pentavós",
"tios hexavôs e tias hexavós", "tios heptavôs e tias heptavós"
"tios octavôs e tias octavós"]
_NEPHEWS_NIECES_LEVEL = ["", "sobrinhos e sobrinhas",
"sobrinhos netos e sobrinhas netas",
"sobrinhos bisnetos e sobrinhas bisnetas",
"sobrinhos trinetos e sobrinhas trinetas"
"sobrinhos tetranetos e sobrinhas tetranetas"
"sobrinhos pentanetos e sobrinhas pentanetas"
"sobrinhos hexanetos e sobrinhas hexanetas"
"sobrinhos heptanetos e sobrinhas heptanetas"
"sobrinhos octanetos e sobrinhas octanetas"
]
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
class RelationshipCalculator(gramps.gen.relationship.RelationshipCalculator):
"""
RelationshipCalculator Class
"""
def __init__(self):
gramps.gen.relationship.RelationshipCalculator.__init__(self)
def get_male_cousin(self, level):
if level < len(_level_name_male):
return "%s primo" % (_level_name_male[level])
else:
return "%dº primo" % level
def get_female_cousin(self, level):
if level < len(_level_name_female):
return "%s prima" % (_level_name_female[level])
else:
return "%dª prima" % level
def get_distant_uncle(self, level):
if level < len(_level_name_male):
return "%s tio" % (_level_name_male[level])
else:
return "%dº tio" % level
def get_distant_aunt(self, level):
if level < len(_level_name_female):
return "%s tia" % (_level_name_female[level])
else:
return "%dª tia" % level
def get_distant_nephew(self, level):
if level < len(_level_name_male):
return "%s sobrinho" % (_level_name_male[level])
else:
return "%dº sobrinho" % level
def get_distant_niece(self, level):
if level < len(_level_name_female):
return "%s sobrinha" % (_level_name_female[level])
else:
return "%dª sobrinha" % level
def get_male_relative(self, level1, level2):
if level1 < len(_level_name_male_a):
level1_str = _level_name_male_a[level1]
else:
level1_str = "%dº" % level1
if level2 < len(_level_name_male_a):
level2_str = _level_name_male_a[level2]
else:
level2_str = "%dº" % level2
level = level1 + level2
if level < len(_level_name_male_a):
level_str = _level_name_male_a[level]
else:
level_str = "%dº" % level
return "parente em %s grau (%s com %s)" % (level_str, level1_str, level2_str)
def get_female_relative(self, level1, level2):
return self.get_male_relative(level1, level2)
def get_parents(self, level):
if level < len(_parents_level):
return _parents_level[level]
elif (level-1) < len(_level_name_plural):
return "%s avós" % (_level_name_plural[level-1])
else:
return "%dº avós" % (level-1)
def get_father(self, level):
if level < len(_father_level):
return _father_level[level]
elif (level-1) < len(_level_name_male_a):
return "%s avô" % (_level_name_male_a[level-1])
else:
return "%dº avô" % (level-1)
def get_son(self, level):
if level < len(_son_level):
return _son_level[level]
elif (level-1) < len(_level_name_male_a):
return "%s neto" % (_level_name_male_a[level-1])
else:
return "%dº neto" % (level-1)
def get_mother(self, level):
if level < len(_mother_level):
return _mother_level[level]
elif (level-1)<len(_level_name_female):
return "%s avó" % (_level_name_female[level-1])
else:
return "%dª avó" % (level-1)
def get_daughter(self, level):
if level < len(_daughter_level):
return _daughter_level[level]
elif (level-1) < len(_level_name_female):
return "%s neta" % (_level_name_female[level-1])
else:
return "%dª neta" % (level-1)
def get_aunt(self, level):
if level < len(_sister_level):
return _sister_level[level]
elif (level-2) < len(_level_name_female):
return "%s tia avó" % (_level_name_female[level-2])
else:
return "%dª tia avó" % (level-2)
def get_uncle(self, level):
if level < len(_brother_level):
return _brother_level[level]
elif (level-2) < len(_level_name_male_a):
return "%s tio avô" % (_level_name_male_a[level-2])
else:
return "%dº tio avô" % (level-2)
def get_nephew(self, level):
if level < len(_nephew_level):
return _nephew_level[level]
elif (level-1) < len(_level_name_male_a):
return "%s sobrinho neto" % (_level_name_male_a[level-1])
else:
return "%dº sobrinho neto" % (level-1)
def get_niece(self, level):
if level < len(_niece_level):
return _niece_level[level]
elif (level-1) < len(_level_name_female):
return "%s sobrinha neta" % (_level_name_female[level-1])
else:
return "%dª sobrinha neta" % (level-1)
def get_relationship(self, secondRel, firstRel, orig_person_gender, other_person_gender):
"""
returns a string representing the relationshp between the two people,
along with a list of common ancestors (typically father, mother)
"""
common = ""
if firstRel == 0:
if secondRel == 0:
return ('', common)
elif other_person_gender == Person.MALE:
return (self.get_father(secondRel), common)
else:
return (self.get_mother(secondRel), common)
elif secondRel == 0:
if other_person_gender == Person.MALE:
return (self.get_son(firstRel), common)
else:
return (self.get_daughter(firstRel), common)
elif firstRel == 1:
if other_person_gender == Person.MALE:
return (self.get_uncle(secondRel), common)
else:
return (self.get_aunt(secondRel), common)
elif secondRel == 1:
if other_person_gender == Person.MALE:
return (self.get_nephew(firstRel-1), common)
else:
return (self.get_niece(firstRel-1), common)
elif firstRel == secondRel == 2:
if other_person_gender == Person.MALE:
return ('primo irmão', common)
else:
return ('prima irmã', common)
elif firstRel == secondRel:
if other_person_gender == Person.MALE:
return (self.get_male_cousin(firstRel-1), common)
else:
return (self.get_female_cousin(firstRel-1), common)
elif firstRel == secondRel+1:
if other_person_gender == Person.MALE:
return (self.get_distant_nephew(secondRel), common)
else:
return (self.get_distant_niece(secondRel), common)
elif firstRel+1 == secondRel:
if other_person_gender == Person.MALE:
return (self.get_distant_uncle(firstRel), common)
else:
return (self.get_distant_aunt(firstRel), common)
else:
if other_person_gender == Person.MALE:
return (self.get_male_relative(firstRel, secondRel), common)
else:
return (self.get_female_relative(firstRel, secondRel), common)
def get_single_relationship_string(self, Ga, Gb, gender_a, gender_b,
reltocommon_a, reltocommon_b,
only_birth=True,
in_law_a=False, in_law_b=False):
return self.get_relationship(Ga, Gb, gender_a, gender_b)[0];
def get_sibling_relationship_string(self, sib_type, gender_a, gender_b,
in_law_a=False, in_law_b=False):
return self.get_relationship(1, 1, gender_a, gender_b)[0];
# Relatório de Parentesco
def get_plural_relationship_string(self, Ga, Gb,
reltocommon_a='', reltocommon_b='',
only_birth=True,
in_law_a=False, in_law_b=False):
"""
Cria o objeto KinshipReport que produz o relatório.
Os argumentos são:
database - a instância do banco de dados GRAMPS
options_class - instância da classe das opções para este relatório
O presente relatório tem os seguintes parâmetros (variáveis de classe)
que entram na classe de opções.
maxdescend - Máximo gerações de descendentes para incluir.
maxascend - Máximo de gerações ancestrais para incluir.
incspouses - Se deseja incluir cônjuges.
inccousins - Se deseja incluir primos.
incaunts - Se deseja incluir tios / sobrinhos.
pid - A identificação Gramps da pessoa central para o relatório.
Preenche um mapa das matrizes contendo os descendentes
da pessoa falecida. Esta função chama a si mesma recursivamente até
atingir max_descend.
Parâmetros:
:param person_handle: o identificador da próxima pessoa
:param Ga: O número de gerações, desde a pessoa principal até o
ancestral comum. É incrementado quando subir as gerações, e
deixado inalterado quando descer as gerações.
:param Gb: O número de gerações desta pessoa (person_handle) até o
ancestral comum. É incrementado quando descer as
gerações and posto a zero quando subir as gerações.
:param skip_handle: Identificador opcional para pular quando descer.
Isso é útil para pular o descendente que trouxe
essa generação em primeiro lugar.
Preenche um mapa das matrizes contendo os ancestrais
da pessoa falecida. Esta função chama a si mesma recursivamente até
atingir max_ascend.
Parâmetros:
:param person_handle: o identificador da próxima pessoa
:param Ga: O número de gerações, desde a pessoa principal até o
ancestral comum. É incrementado quando subir as gerações, e
deixado inalterado quando descer as gerações.
:param Gb: O número de gerações desta pessoa (person_handle) até o
ancestral comum. É incrementado quando descer as
gerações and posto a zero quando subir as gerações.
"""
rel_str = "???"
if Ga == 0:
# These are descendants
if Gb < len(_CHILDREN_LEVEL):
rel_str = _CHILDREN_LEVEL[Gb]
else:
rel_str = "descendentes"
elif Gb == 0:
# These are parents/grand parents
if Ga < len(_PARENTS_LEVEL):
rel_str = _PARENTS_LEVEL[Ga]
else:
rel_str = "ancestrais"
elif Gb == 1:
# These are siblings/aunts/uncles
if Ga < len(_SIBLINGS_LEVEL):
rel_str = _SIBLINGS_LEVEL[Ga]
else:
rel_str = "filhos dos ancestrais"
elif Ga == 1:
# These are nieces/nephews
if Gb < len(_NEPHEWS_NIECES_LEVEL):
rel_str = _NEPHEWS_NIECES_LEVEL[Gb - 1]
else:
rel_str = "sobrinhos sobrinhas"
elif Ga > 1 and Ga == Gb:
# These are cousins in the same generation
if Ga == 2:
rel_str = "primos e primas"
elif Ga <= len(_level_name_plural):
rel_str = "%s primos e primas" % _level_name_plural[Ga -
2]
else:
# security
rel_str = "primos e primas"
if in_law_b == True:
rel_str = "cônjuges dos %s" % rel_str
return rel_str
if __name__ == "__main__":
# Test function. Call it as follows from the command line (so as to find
# imported modules):
# export PYTHONPATH=/path/to/gramps/src
# python src/plugins/rel/rel_pt.py
# (Above not needed here)
"""TRANSLATORS, copy this if statement at the bottom of your
rel_xx.py module, and test your work with:
python src/plugins/rel/rel_xx.py
"""
from gprime.relationship import test
RC = RelationshipCalculator()
test(RC, True)
|
sam-m888/gprime
|
gprime/plugins/rel/rel_pt.py
|
Python
|
gpl-2.0
| 19,304
|
[
"Brian"
] |
c9f2ea7e94c2af90138c80487e8df99262c0f77c30607b934817823a497a9a9f
|
#!/usr/bin/env python
import pysam
import sys
inFileName = sys.argv[1]
bamFile = pysam.Samfile(sys.argv[2])
outFile = open(sys.argv[3], 'w')
if (inFileName == "stdin"):
inFile = sys.stdin
else:
inFile = open(inFileName)
for line in inFile.readlines():
region = line.split()
sup = bamFile.count(region[0], (int(region[2]) + int(region[1])) / 2 - 1, (int(region[2]) + int(region[1])) / 2)
outFile.write(line.strip() + "\t" + str(sup) + "\n")
outFile.close()
|
yunlongliukm/chm1_scripts
|
one_off/AddCoverage.py
|
Python
|
mit
| 481
|
[
"pysam"
] |
869cd8904f1a18b09f691e6ca27dfebc8a1f93cabe32843818ba4b0943a061bb
|
#!/usr/bin/env python
import sys
import netCDF4 as nc
import numpy as np
import time
from tvtk.api import tvtk
from tvtk.tools import mlab
from tvtk.common import configure_input_data
def normalise_lons(lons):
"""
Make a copy of lons that is between -180 and 180.
"""
lons_n = np.copy(lons)
lons_n[lons_n > 180] = lons_n[lons_n > 180] - 360
lons_n[lons_n < -180] = lons_n[lons_n < -180] + 360
return lons_n
def main():
with nc.Dataset('grid.nc') as f:
lats = f.variables['geolat_c'][:] * (np.pi / 180.0)
lons = normalise_lons(f.variables['geolon_c'][:]) * (np.pi / 180.0)
with nc.Dataset('ocean_mask.nc') as f:
ocean_mask = f.variables['mask'][:]
with nc.Dataset('ocean.nc') as f:
u = f.variables['u'][0, 0, :, :]
v = f.variables['v'][0, 0, :, :]
u = u[::2, ::2]
v = v[::2, ::2]
u[u.mask] = 0.
v[v.mask] = 0.
speed = np.sqrt(u**2 * v**2)
#speed[speed.mask] = np.NaN
speed = np.ravel(speed)
# Reduce resolution in order to speed things up.
lats = lats[::2, ::2]
lons = lons[::2, ::2]
x = np.cos(lats) * np.cos(lons)
y = np.sin(lats)
z = -(np.cos(lats) * np.sin(lons))
assert(x.shape == y.shape == x.shape)
points = np.empty((x.shape[0]*x.shape[1], 3))
points[:, 0] = np.ravel(x)
points[:, 1] = np.ravel(y)
points[:, 2] = np.ravel(z)
indxs = np.empty((x.shape[0] * x.shape[1]), dtype='int32')
indxs[:] = range(x.shape[0] * x.shape[1])
indxs = indxs.reshape((x.shape[0], x.shape[1]))
# Set up connectivity as indices to the points
cells = np.empty((x.shape[0]-1, x.shape[1], 4), dtype='int32')
cells[:] = -1
cells[:, :, 0] = indxs[:-1, :]
cells[:, :, 1] = indxs[1:,:]
cells[:, :-1, 2] = indxs[1:,1:]
cells[:, :-1:, 3] = indxs[:-1,1:]
# Fix up the final column, left out above
cells[:, -1, 2] = indxs[1:, 0]
cells[:, -1, 3] = indxs[:-1, 0]
# Check that there's no -1's left.
assert(np.min(cells) == 0)
# FIMXE: fix up the NH
# Put into VTK shape
cells = cells.reshape((cells.shape[0]*cells.shape[1], 4))
### TVTK PIPELINE
# create LUT
lut = tvtk.LookupTable()
lut.scale = 'log10'
lut.table_range = np.min(speed), np.max(speed)
lut.hue_range = 0.5, 0.95
lut.saturation_range = 0.5, 0.5
lut.value_range = 0.75, 0.75
#lut.nan_color = 0, 0, 0, 0
lut.build()
# create a renderer
renderer = tvtk.Renderer()
# create a render window and hand it the renderer
render_window = tvtk.RenderWindow(size=(800,800))
render_window.add_renderer(renderer)
# create interactor and hand it the render window
# This handles mouse interaction with window.
interactor = tvtk.RenderWindowInteractor(render_window=render_window)
# Create a mesh from the data created above.
mesh = tvtk.PolyData(points=points, polys=cells)
mesh.point_data.scalars = speed
mesh.point_data.scalars.name = 'Speed'
# Set the mapper to scale temperature range
# across the entire range of colors
mapper = tvtk.PolyDataMapper(lookup_table=lut)
configure_input_data(mapper, mesh)
actor = tvtk.Actor(mapper=mapper)
# Now add the actors to the renderer and start the interaction.
renderer.add_actor(actor)
interactor.initialize()
interactor.start()
if __name__ == '__main__':
sys.exit(main())
|
nicjhan/mom-particles
|
vtk/velocity.py
|
Python
|
gpl-2.0
| 3,433
|
[
"VTK"
] |
5ad7434a4a3d482c5be5eace9e897f09fac1de16def56d26e614e2357226a4be
|
# -*- coding: utf-8 -*-
"""
pysteps.timeseries.autoregression
=================================
Methods related to autoregressive AR(p) models.
.. autosummary::
:toctree: ../generated/
adjust_lag2_corrcoef1
adjust_lag2_corrcoef2
ar_acf
estimate_ar_params_ols
estimate_ar_params_ols_localized
estimate_ar_params_yw
estimate_ar_params_yw_localized
estimate_var_params_ols
estimate_var_params_ols_localized
estimate_var_params_yw
iterate_ar_model
iterate_var_model
"""
import numpy as np
from scipy.special import binom
from scipy import linalg as la
from scipy import ndimage
def adjust_lag2_corrcoef1(gamma_1, gamma_2):
"""
A simple adjustment of lag-2 temporal autocorrelation coefficient to
ensure that the resulting AR(2) process is stationary when the parameters
are estimated from the Yule-Walker equations.
Parameters
----------
gamma_1: float
Lag-1 temporal autocorrelation coeffient.
gamma_2: float
Lag-2 temporal autocorrelation coeffient.
Returns
-------
out: float
The adjusted lag-2 correlation coefficient.
"""
gamma_2 = np.maximum(gamma_2, 2 * gamma_1 * gamma_1 - 1 + 1e-10)
gamma_2 = np.minimum(gamma_2, 1 - 1e-10)
return gamma_2
def adjust_lag2_corrcoef2(gamma_1, gamma_2):
"""
A more advanced adjustment of lag-2 temporal autocorrelation coefficient
to ensure that the resulting AR(2) process is stationary when
the parameters are estimated from the Yule-Walker equations.
Parameters
----------
gamma_1: float
Lag-1 temporal autocorrelation coeffient.
gamma_2: float
Lag-2 temporal autocorrelation coeffient.
Returns
-------
out: float
The adjusted lag-2 correlation coefficient.
"""
gamma_2 = np.maximum(gamma_2, 2 * gamma_1 * gamma_2 - 1)
gamma_2 = np.maximum(
gamma_2, (3 * gamma_1**2 - 2 + 2 * (1 - gamma_1**2) ** 1.5) / gamma_1**2
)
return gamma_2
def ar_acf(gamma, n=None):
"""
Compute theoretical autocorrelation function (ACF) from the AR(p) model
with lag-l, l=1,2,...,p temporal autocorrelation coefficients.
Parameters
----------
gamma: array-like
Array of length p containing the lag-l, l=1,2,...p, temporal
autocorrelation coefficients.
The correlation coefficients are assumed to be in ascending
order with respect to time lag.
n: int
Desired length of ACF array. Must be greater than len(gamma).
Returns
-------
out: array-like
Array containing the ACF values.
"""
ar_order = len(gamma)
if n == ar_order or n is None:
return gamma
elif n < ar_order:
raise ValueError(
"n=%i, but must be larger than the order of the AR process %i"
% (n, ar_order)
)
phi = estimate_ar_params_yw(gamma)[:-1]
acf = gamma.copy()
for t in range(0, n - ar_order):
# Retrieve gammas (in reverse order)
gammas = acf[t : t + ar_order][::-1]
# Compute next gamma
gamma_ = np.sum(gammas * phi)
acf.append(gamma_)
return acf
def estimate_ar_params_ols(
x, p, d=0, check_stationarity=True, include_constant_term=False, h=0, lam=0.0
):
r"""
Estimate the parameters of an autoregressive AR(p) model
:math:`x_{k+1}=c+\phi_1 x_k+\phi_2 x_{k-1}+\dots+\phi_p x_{k-p}+\phi_{p+1}\epsilon`
by using ordinary least squares (OLS). If :math:`d\geq 1`, the parameters
are estimated for a d times differenced time series that is integrated back
to the original one by summation of the differences.
Parameters
----------
x: array_like
Array of shape (n,...) containing a time series of length n=p+d+h+1.
The remaining dimensions are flattened. The rows and columns of x
represent time steps and samples, respectively.
p: int
The order of the model.
d: {0,1}
The order of differencing to apply to the time series.
check_stationarity: bool
Check the stationarity of the estimated model.
include_constant_term: bool
Include the constant term :math:`c` to the model.
h: int
If h>0, the fitting is done by using a history of length h in addition
to the minimal required number of time steps n=p+d+1.
lam: float
If lam>0, the regression is regularized by adding a penalty term
(i.e. ridge regression).
Returns
-------
out: list
The estimated parameter matrices :math:`\mathbf{\Phi}_1,\mathbf{\Phi}_2,
\dots,\mathbf{\Phi}_{p+1}`. If include_constant_term is True, the
constant term :math:`c` is added to the beginning of the list.
Notes
-----
Estimation of the innovation term parameter :math:`\phi_{p+1}` is currently
implemented for p<=2. If p > 2, :math:`\phi_{p+1}` is set to zero.
"""
n = x.shape[0]
if n != p + d + h + 1:
raise ValueError(
"n = %d, p = %d, d = %d, h = %d, but n = p+d+h+1 = %d required"
% (n, p, d, h, p + d + h + 1)
)
if len(x.shape) > 1:
x = x.reshape((n, np.prod(x.shape[1:])))
if d not in [0, 1]:
raise ValueError("d = %d, but 0 or 1 required" % d)
if d == 1:
x = np.diff(x, axis=0)
n -= d
x_lhs = x[p:, :]
Z = []
for i in range(x.shape[1]):
for j in range(p - 1, n - 1 - h):
z_ = np.hstack([x[j - k, i] for k in range(p)])
if include_constant_term:
z_ = np.hstack([[1], z_])
Z.append(z_)
Z = np.column_stack(Z)
b = np.dot(
np.dot(x_lhs, Z.T), np.linalg.inv(np.dot(Z, Z.T) + lam * np.eye(Z.shape[0]))
)
b = b.flatten()
if include_constant_term:
c = b[0]
phi = list(b[1:])
else:
phi = list(b)
if p == 1:
phi_pert = np.sqrt(1.0 - phi[0] * phi[0])
elif p == 2:
phi_pert = np.sqrt(
(1.0 + phi[1]) * ((1.0 - phi[1]) ** 2.0 - phi[0] ** 2.0) / (1.0 - phi[1])
)
else:
phi_pert = 0.0
if check_stationarity:
if not test_ar_stationarity(phi):
raise RuntimeError(
"Error in estimate_ar_params_yw: " "nonstationary AR(p) process"
)
if d == 1:
phi_out = _compute_differenced_model_params(phi, p, 1, 1)
else:
phi_out = phi
phi_out.append(phi_pert)
if include_constant_term:
phi_out.insert(0, c)
return phi_out
def estimate_ar_params_ols_localized(
x,
p,
window_radius,
d=0,
include_constant_term=False,
h=0,
lam=0.0,
window="gaussian",
):
r"""
Estimate the parameters of a localized AR(p) model
:math:`x_{k+1,i}=c_i+\phi_{1,i}x_{k,i}+\phi_{2,i}x_{k-1,i}+\dots+\phi_{p,i}x_{k-p,i}+\phi_{p+1,i}\epsilon`
by using ordinary least squares (OLS), where :math:`i` denote spatial
coordinates with arbitrary dimension. If :math:`d\geq 1`, the parameters
are estimated for a d times differenced time series that is integrated back
to the original one by summation of the differences.
Parameters
----------
x: array_like
Array of shape (n,...) containing a time series of length n=p+d+h+1.
The remaining dimensions are flattened. The rows and columns of x
represent time steps and samples, respectively.
p: int
The order of the model.
window_radius: float
Radius of the moving window. If window is 'gaussian', window_radius is
the standard deviation of the Gaussian filter. If window is 'uniform',
the size of the window is 2*window_radius+1.
d: {0,1}
The order of differencing to apply to the time series.
include_constant_term: bool
Include the constant term :math:`c_i` to the model.
h: int
If h>0, the fitting is done by using a history of length h in addition
to the minimal required number of time steps n=p+d+1.
lam: float
If lam>0, the regression is regularized by adding a penalty term
(i.e. ridge regression).
window: {"gaussian", "uniform"}
The weight function to use for the moving window. Applicable if
window_radius < np.inf. Defaults to 'gaussian'.
Returns
-------
out: list
List of length p+1 containing the AR(p) parameter fields for for the
lag-p terms and the innovation term. The parameter fields have the same
shape as the elements of gamma. Nan values are assigned, where the
sample size for estimating the parameters is too small. If
include_constant_term is True, the constant term :math:`c_i` is added
to the beginning of the list.
Notes
-----
Estimation of the innovation term parameter :math:`\phi_{p+1}` is currently
implemented for p<=2. If p > 2, :math:`\phi_{p+1}` is set to a zero array.
"""
n = x.shape[0]
if n != p + d + h + 1:
raise ValueError(
"n = %d, p = %d, d = %d, h = %d, but n = p+d+h+1 = %d required"
% (n, p, d, h, p + d + h + 1)
)
if d == 1:
x = np.diff(x, axis=0)
n -= d
if window == "gaussian":
convol_filter = ndimage.gaussian_filter
else:
convol_filter = ndimage.uniform_filter
if window == "uniform":
window_size = 2 * window_radius + 1
else:
window_size = window_radius
XZ = np.zeros(np.hstack([[p], x.shape[1:]]))
for i in range(p):
for j in range(h + 1):
tmp = convol_filter(
x[p + j, :] * x[p - 1 - i + j, :], window_size, mode="constant"
)
XZ[i, :] += tmp
if include_constant_term:
v = 0.0
for i in range(h + 1):
v += convol_filter(x[p + i, :], window_size, mode="constant")
XZ = np.vstack([v[np.newaxis, :], XZ])
if not include_constant_term:
Z2 = np.zeros(np.hstack([[p, p], x.shape[1:]]))
for i in range(p):
for j in range(p):
for k in range(h + 1):
tmp = convol_filter(
x[p - 1 - i + k, :] * x[p - 1 - j + k, :],
window_size,
mode="constant",
)
Z2[i, j, :] += tmp
else:
Z2 = np.zeros(np.hstack([[p + 1, p + 1], x.shape[1:]]))
Z2[0, 0, :] = convol_filter(np.ones(x.shape[1:]), window_size, mode="constant")
for i in range(p):
for j in range(h + 1):
tmp = convol_filter(x[p - 1 - i + j, :], window_size, mode="constant")
Z2[0, i + 1, :] += tmp
Z2[i + 1, 0, :] += tmp
for i in range(p):
for j in range(p):
for k in range(h + 1):
tmp = convol_filter(
x[p - 1 - i + k, :] * x[p - 1 - j + k, :],
window_size,
mode="constant",
)
Z2[i + 1, j + 1, :] += tmp
m = np.prod(x.shape[1:])
phi = np.empty(np.hstack([[p], m]))
if include_constant_term:
c = np.empty(m)
XZ = XZ.reshape(np.hstack([[XZ.shape[0]], m]))
Z2 = Z2.reshape(np.hstack([[Z2.shape[0], Z2.shape[1]], m]))
for i in range(m):
try:
b = np.dot(XZ[:, i], np.linalg.inv(Z2[:, :, i] + lam * np.eye(Z2.shape[0])))
if not include_constant_term:
phi[:, i] = b
else:
phi[:, i] = b[1:]
c[i] = b[0]
except np.linalg.LinAlgError:
phi[:, i] = np.nan
if include_constant_term:
c[i] = np.nan
if p == 1:
phi_pert = np.sqrt(1.0 - phi[0, :] * phi[0, :])
elif p == 2:
phi_pert = np.sqrt(
(1.0 + phi[1, :])
* ((1.0 - phi[1, :]) ** 2.0 - phi[0, :] ** 2.0)
/ (1.0 - phi[1, :])
)
else:
phi_pert = np.zeros(m)
phi = list(phi.reshape(np.hstack([[phi.shape[0]], x.shape[1:]])))
if d == 1:
phi = _compute_differenced_model_params(phi, p, 1, 1)
phi.append(phi_pert.reshape(x.shape[1:]))
if include_constant_term:
phi.insert(0, c.reshape(x.shape[1:]))
return phi
def estimate_ar_params_yw(gamma, d=0, check_stationarity=True):
r"""
Estimate the parameters of an AR(p) model
:math:`x_{k+1}=\phi_1 x_k+\phi_2 x_{k-1}+\dots+\phi_p x_{k-p}+\phi_{p+1}\epsilon`
from the Yule-Walker equations using the given set of autocorrelation
coefficients.
Parameters
----------
gamma: array_like
Array of length p containing the lag-l temporal autocorrelation
coefficients for l=1,2,...p. The correlation coefficients are assumed
to be in ascending order with respect to time lag.
d: {0,1}
The order of differencing. If d=1, the correlation coefficients gamma
are assumed to be computed from the differenced time series, which is
also done for the resulting parameter estimates.
check_stationarity: bool
If True, the stationarity of the resulting VAR(p) process is tested. An
exception is thrown if the process is not stationary.
Returns
-------
out: ndarray
Array of length p+1 containing the AR(p) parameters for for the
lag-p terms and the innovation term.
Notes
-----
To estimate the parameters of an integrated ARI(p,d) model, compute the
correlation coefficients gamma by calling
:py:func:`pysteps.timeseries.correlation.temporal_autocorrelation` with d>0.
"""
if d not in [0, 1]:
raise ValueError("d = %d, but 0 or 1 required" % d)
p = len(gamma)
g = np.hstack([[1.0], gamma])
G = []
for j in range(p):
G.append(np.roll(g[:-1], j))
G = np.array(G)
phi = np.linalg.solve(G, g[1:].flatten())
# Check that the absolute values of the roots of the characteristic
# polynomial are less than one.
# Otherwise the AR(p) model is not stationary.
if check_stationarity:
if not test_ar_stationarity(phi):
raise RuntimeError(
"Error in estimate_ar_params_yw: " "nonstationary AR(p) process"
)
c = 1.0
for j in range(p):
c -= gamma[j] * phi[j]
phi_pert = np.sqrt(c)
# If the expression inside the square root is negative, phi_pert cannot
# be computed and it is set to zero instead.
if not np.isfinite(phi_pert):
phi_pert = 0.0
if d == 1:
phi = _compute_differenced_model_params(phi, p, 1, 1)
phi_out = np.empty(len(phi) + 1)
phi_out[: len(phi)] = phi
phi_out[-1] = phi_pert
return phi_out
def estimate_ar_params_yw_localized(gamma, d=0):
r"""
Estimate the parameters of a localized AR(p) model
:math:`x_{k+1,i}=\phi_{1,i}x_{k,i}+\phi_{2,i}x_{k-1,i}+\dots+\phi_{p,i}x_{k-p,i}+\phi_{p+1}\epsilon`
from the Yule-Walker equations using the given set of autocorrelation
coefficients :math`\gamma_{l,i}`, where :math`l` denotes time lag and
:math:`i` denote spatial coordinates with arbitrary dimension.
Parameters
----------
gamma: array_like
A list containing the lag-l temporal autocorrelation coefficient fields
for l=1,2,...p. The correlation coefficients are assumed to be in
ascending order with respect to time lag.
d: {0,1}
The order of differencing. If d=1, the correlation coefficients gamma
are assumed to be computed from the differenced time series, which is
also done for the resulting parameter estimates.
Returns
-------
out: list
List of length p+1 containing the AR(p) parameter fields for for the
lag-p terms and the innovation term. The parameter fields have the same
shape as the elements of gamma.
Notes
-----
To estimate the parameters of an integrated ARI(p,d) model, compute the
correlation coefficients gamma by calling
:py:func:`pysteps.timeseries.correlation.temporal_autocorrelation` with d>0
and window_radius<np.inf.
"""
for i in range(1, len(gamma)):
if gamma[i].shape != gamma[0].shape:
raise ValueError(
"the correlation coefficient fields gamma have mismatching shapes"
)
if d not in [0, 1]:
raise ValueError("d = %d, but 0 or 1 required" % d)
p = len(gamma)
n = np.prod(gamma[0].shape)
gamma_1d = [gamma[i].flatten() for i in range(len(gamma))]
phi = np.empty((p, n))
for i in range(n):
g = np.hstack([[1.0], [gamma_1d[k][i] for k in range(len(gamma_1d))]])
G = []
for k in range(p):
G.append(np.roll(g[:-1], k))
G = np.array(G)
try:
phi_ = np.linalg.solve(G, g[1:].flatten())
except np.linalg.LinAlgError:
phi_ = np.ones(p) * np.nan
phi[:, i] = phi_
c = 1.0
for i in range(p):
c -= gamma_1d[i] * phi[i]
phi_pert = np.sqrt(c)
if d == 1:
phi = _compute_differenced_model_params(phi, p, 1, 1)
phi_out = np.empty((len(phi) + 1, n))
phi_out[: len(phi), :] = phi
phi_out[-1, :] = phi_pert
return list(phi_out.reshape(np.hstack([[len(phi_out)], gamma[0].shape])))
def estimate_var_params_ols(
x, p, d=0, check_stationarity=True, include_constant_term=False, h=0, lam=0.0
):
r"""
Estimate the parameters of a vector autoregressive VAR(p) model
:math:`\mathbf{x}_{k+1}=\mathbf{c}+\mathbf{\Phi}_1\mathbf{x}_k+
\mathbf{\Phi}_2\mathbf{x}_{k-1}+\dots+\mathbf{\Phi}_p\mathbf{x}_{k-p}+
\mathbf{\Phi}_{p+1}\mathbf{\epsilon}`
by using ordinary least squares (OLS). If :math:`d\geq 1`, the parameters
are estimated for a d times differenced time series that is integrated back
to the original one by summation of the differences.
Parameters
----------
x: array_like
Array of shape (n, q, :) containing a time series of length n=p+d+h+1
with q-dimensional variables. The remaining dimensions are flattened.
The remaining dimensions starting from the third one represent the
samples.
p: int
The order of the model.
d: {0,1}
The order of differencing to apply to the time series.
check_stationarity: bool
If True, the stationarity of the resulting VAR(p) process is tested. An
exception is thrown if the process is not stationary.
include_constant_term: bool
Include the constant term :math:`\mathbf{c}` to the model.
h: int
If h>0, the fitting is done by using a history of length h in addition
to the minimal required number of time steps n=p+d+1.
lam: float
If lam>0, the regression is regularized by adding a penalty term
(i.e. ridge regression).
Returns
-------
out: list
The estimated parameter matrices :math:`\mathbf{\Phi}_1,\mathbf{\Phi}_2,
\dots,\mathbf{\Phi}_{p+1}`. If include_constant_term is True, the
constant term :math:`\mathbf{c}` is added to the beginning of the list.
Notes
-----
Estimation of the innovation parameter :math:`\mathbf{\Phi}_{p+1}` is not
currently implemented, and it is set to a zero matrix.
"""
q = x.shape[1]
n = x.shape[0]
if n != p + d + h + 1:
raise ValueError(
"n = %d, p = %d, d = %d, h = %d, but n = p+d+h+1 = %d required"
% (n, p, d, h, p + d + h + 1)
)
if d not in [0, 1]:
raise ValueError("d = %d, but 0 or 1 required" % d)
if d == 1:
x = np.diff(x, axis=0)
n -= d
x = x.reshape((n, q, np.prod(x.shape[2:])))
X = []
for i in range(x.shape[2]):
for j in range(p + h, n):
x_ = x[j, :, i]
X.append(x_.reshape((q, 1)))
X = np.hstack(X)
Z = []
for i in range(x.shape[2]):
for j in range(p - 1, n - 1 - h):
z_ = np.vstack([x[j - k, :, i].reshape((q, 1)) for k in range(p)])
if include_constant_term:
z_ = np.vstack([[1], z_])
Z.append(z_)
Z = np.column_stack(Z)
B = np.dot(np.dot(X, Z.T), np.linalg.inv(np.dot(Z, Z.T) + lam * np.eye(Z.shape[0])))
phi = []
if include_constant_term:
c = B[:, 0]
for i in range(p):
phi.append(B[:, i * q + 1 : (i + 1) * q + 1])
else:
for i in range(p):
phi.append(B[:, i * q : (i + 1) * q])
if check_stationarity:
M = np.zeros((p * q, p * q))
for i in range(p):
M[0:q, i * q : (i + 1) * q] = phi[i]
for i in range(1, p):
M[i * q : (i + 1) * q, (i - 1) * q : i * q] = np.eye(q, q)
r, v = np.linalg.eig(M)
if np.any(np.abs(r) > 0.999):
raise RuntimeError(
"Error in estimate_var_params_ols: " "nonstationary VAR(p) process"
)
if d == 1:
phi = _compute_differenced_model_params(phi, p, q, 1)
if include_constant_term:
phi.insert(0, c)
phi.append(np.zeros((q, q)))
return phi
def estimate_var_params_ols_localized(
x,
p,
window_radius,
d=0,
include_constant_term=False,
h=0,
lam=0.0,
window="gaussian",
):
r"""
Estimate the parameters of a vector autoregressive VAR(p) model
:math:`\mathbf{x}_{k+1,i}=\mathbf{c}_i+\mathbf{\Phi}_{1,i}\mathbf{x}_{k,i}+
\mathbf{\Phi}_{2,i}\mathbf{x}_{k-1,i}+\dots+\mathbf{\Phi}_{p,i}
\mathbf{x}_{k-p,i}+\mathbf{\Phi}_{p+1,i}\mathbf{\epsilon}`
by using ordinary least squares (OLS), where :math:`i` denote spatial
coordinates with arbitrary dimension. If :math:`d\geq 1`, the parameters
are estimated for a d times differenced time series that is integrated back
to the original one by summation of the differences.
Parameters
----------
x: array_like
Array of shape (n, q, :) containing a time series of length n=p+d+h+1
with q-dimensional variables. The remaining dimensions are flattened.
The remaining dimensions starting from the third one represent the
samples.
p: int
The order of the model.
window_radius: float
Radius of the moving window. If window is 'gaussian', window_radius is
the standard deviation of the Gaussian filter. If window is 'uniform',
the size of the window is 2*window_radius+1.
d: {0,1}
The order of differencing to apply to the time series.
include_constant_term: bool
Include the constant term :math:`\mathbf{c}` to the model.
h: int
If h>0, the fitting is done by using a history of length h in addition
to the minimal required number of time steps n=p+d+1.
lam: float
If lam>0, the regression is regularized by adding a penalty term
(i.e. ridge regression).
window: {"gaussian", "uniform"}
The weight function to use for the moving window. Applicable if
window_radius < np.inf. Defaults to 'gaussian'.
Returns
-------
out: list
The estimated parameter matrices :math:`\mathbf{\Phi}_{1,i},
\mathbf{\Phi}_{2,i},\dots,\mathbf{\Phi}_{p+1,i}`. If
include_constant_term is True, the constant term :math:`\mathbf{c}_i` is
added to the beginning of the list. Each element of the list is a matrix
of shape (x.shape[2:], q, q).
Notes
-----
Estimation of the innovation parameter :math:`\mathbf{\Phi}_{p+1}` is not
currently implemented, and it is set to a zero matrix.
"""
q = x.shape[1]
n = x.shape[0]
if n != p + d + h + 1:
raise ValueError(
"n = %d, p = %d, d = %d, h = %d, but n = p+d+h+1 = %d required"
% (n, p, d, h, p + d + h + 1)
)
if d == 1:
x = np.diff(x, axis=0)
n -= d
if window == "gaussian":
convol_filter = ndimage.gaussian_filter
else:
convol_filter = ndimage.uniform_filter
if window == "uniform":
window_size = 2 * window_radius + 1
else:
window_size = window_radius
XZ = np.zeros(np.hstack([[q, p * q], x.shape[2:]]))
for i in range(q):
for k in range(p):
for j in range(q):
for l in range(h + 1):
tmp = convol_filter(
x[p + l, i, :] * x[p - 1 - k + l, j, :],
window_size,
mode="constant",
)
XZ[i, k * q + j, :] += tmp
if include_constant_term:
v = np.zeros(np.hstack([[q], x.shape[2:]]))
for i in range(q):
for j in range(h + 1):
v[i, :] += convol_filter(x[p + j, i, :], window_size, mode="constant")
XZ = np.hstack([v[:, np.newaxis, :], XZ])
if not include_constant_term:
Z2 = np.zeros(np.hstack([[p * q, p * q], x.shape[2:]]))
for i in range(p):
for j in range(q):
for k in range(p):
for l in range(q):
for m in range(h + 1):
tmp = convol_filter(
x[p - 1 - i + m, j, :] * x[p - 1 - k + m, l, :],
window_size,
mode="constant",
)
Z2[i * q + j, k * q + l, :] += tmp
else:
Z2 = np.zeros(np.hstack([[p * q + 1, p * q + 1], x.shape[2:]]))
Z2[0, 0, :] = convol_filter(np.ones(x.shape[2:]), window_size, mode="constant")
for i in range(p):
for j in range(q):
for k in range(h + 1):
tmp = convol_filter(
x[p - 1 - i + k, j, :], window_size, mode="constant"
)
Z2[0, i * q + j + 1, :] += tmp
Z2[i * q + j + 1, 0, :] += tmp
for i in range(p):
for j in range(q):
for k in range(p):
for l in range(q):
for m in range(h + 1):
tmp = convol_filter(
x[p - 1 - i + m, j, :] * x[p - 1 - k + m, l, :],
window_size,
mode="constant",
)
Z2[i * q + j + 1, k * q + l + 1, :] += tmp
m = np.prod(x.shape[2:])
if include_constant_term:
c = np.empty((m, q))
XZ = XZ.reshape((XZ.shape[0], XZ.shape[1], m))
Z2 = Z2.reshape((Z2.shape[0], Z2.shape[1], m))
phi = np.empty((p, m, q, q))
for i in range(m):
try:
B = np.dot(
XZ[:, :, i], np.linalg.inv(Z2[:, :, i] + lam * np.eye(Z2.shape[0]))
)
for k in range(p):
if not include_constant_term:
phi[k, i, :, :] = B[:, k * q : (k + 1) * q]
else:
phi[k, i, :, :] = B[:, k * q + 1 : (k + 1) * q + 1]
if include_constant_term:
c[i, :] = B[:, 0]
except np.linalg.LinAlgError:
phi[:, i, :, :] = np.nan
if include_constant_term:
c[i, :] = np.nan
phi_out = [
phi[i].reshape(np.hstack([x.shape[2:], [q, q]])) for i in range(len(phi))
]
if d == 1:
phi_out = _compute_differenced_model_params(phi_out, p, q, 1)
phi_out.append(np.zeros(phi_out[0].shape))
if include_constant_term:
phi_out.insert(0, c.reshape(np.hstack([x.shape[2:], [q]])))
return phi_out
def estimate_var_params_yw(gamma, d=0, check_stationarity=True):
r"""
Estimate the parameters of a VAR(p) model
:math:`\mathbf{x}_{k+1}=\mathbf{\Phi}_1\mathbf{x}_k+
\mathbf{\Phi}_2\mathbf{x}_{k-1}+\dots+\mathbf{\Phi}_p\mathbf{x}_{k-p}+
\mathbf{\Phi}_{p+1}\mathbf{\epsilon}`
from the Yule-Walker equations using the given correlation matrices
:math:`\mathbf{\Gamma}_0,\mathbf{\Gamma}_1,\dots,\mathbf{\Gamma}_n`, where
n=p.
Parameters
----------
gamma: list
List of correlation matrices
:math:`\mathbf{\Gamma}_0,\mathbf{\Gamma}_1,\dots,\mathbf{\Gamma}_n`.
To obtain these matrices, use
:py:func:`pysteps.timeseries.correlation.temporal_autocorrelation_multivariate`
with window_radius=np.inf.
d: {0,1}
The order of differencing. If d=1, the correlation coefficients gamma
are assumed to be computed from the differenced time series, which is
also done for the resulting parameter estimates.
check_stationarity: bool
If True, the stationarity of the resulting VAR(p) process is tested. An
exception is thrown if the process is not stationary.
Returns
-------
out: list
List of VAR(p) coefficient matrices :math:`\mathbf{\Phi}_1,
\mathbf{\Phi}_2,\dots\mathbf{\Phi}_{p+1}`, where the last matrix
corresponds to the innovation term.
Notes
-----
To estimate the parameters of an integrated VARI(p,d) model, compute the
correlation coefficients gamma by calling
:py:func:`pysteps.timeseries.correlation.temporal_autocorrelation_multivariate`
with d>0. Estimation of the innovation parameter :math:`\mathbf{\Phi}_{p+1}`
is not currently implemented, and it is set to a zero matrix.
"""
p = len(gamma) - 1
q = gamma[0].shape[0]
for i in range(len(gamma)):
if gamma[i].shape[0] != q or gamma[i].shape[1] != q:
raise ValueError(
"dimension mismatch: gamma[%d].shape=%s, but (%d,%d) expected"
% (i, str(gamma[i].shape), q, q)
)
if d not in [0, 1]:
raise ValueError("d = %d, but 0 or 1 required" % d)
a = np.empty((p * q, p * q))
for i in range(p):
for j in range(p):
a_tmp = gamma[abs(i - j)]
if i > j:
a_tmp = a_tmp.T
a[i * q : (i + 1) * q, j * q : (j + 1) * q] = a_tmp
b = np.vstack([gamma[i].T for i in range(1, p + 1)])
x = np.linalg.solve(a, b)
phi = []
for i in range(p):
phi.append(x[i * q : (i + 1) * q, :])
if check_stationarity:
if not test_var_stationarity(phi):
raise RuntimeError(
"Error in estimate_var_params_yw: " "nonstationary VAR(p) process"
)
if d == 1:
phi = _compute_differenced_model_params(phi, p, q, 1)
phi.append(np.zeros(phi[0].shape))
return phi
def estimate_var_params_yw_localized(gamma, d=0):
r"""
Estimate the parameters of a vector autoregressive VAR(p) model
:math:`\mathbf{x}_{k+1,i}=\mathbf{\Phi}_{1,i}\mathbf{x}_{k,i}+
\mathbf{\Phi}_{2,i}\mathbf{x}_{k-1,i}+\dots+\mathbf{\Phi}_{p,i}
\mathbf{x}_{k-p,i}+\mathbf{\Phi}_{p+1,i}\mathbf{\epsilon}`
from the Yule-Walker equations by using the given correlation matrices,
where :math:`i` denote spatial coordinates with arbitrary dimension.
Parameters
----------
gamma: list
List of correlation matrices
:math:`\mathbf{\Gamma}_0,\mathbf{\Gamma}_1,\dots,\mathbf{\Gamma}_n`.
To obtain these matrices, use
:py:func:`pysteps.timeseries.correlation.temporal_autocorrelation_multivariate`
with window_radius<np.inf.
d: {0,1}
The order of differencing. If d=1, the correlation coefficients gamma
are assumed to be computed from the differenced time series, which is
also done for the resulting parameter estimates.
Returns
-------
out: list
The estimated parameter matrices :math:`\mathbf{\Phi}_{1,i},
\mathbf{\Phi}_{2,i},\dots,\mathbf{\Phi}_{p+1,i}`. Each element of the
list has the same shape as those in gamma.
Notes
-----
To estimate the parameters of an integrated VARI(p,d) model, compute the
correlation coefficients gamma by calling
:py:func:`pysteps.timeseries.correlation.temporal_autocorrelation_multivariate`
with d>0 and window_radius<np.inf. Estimation of the innovation parameter
:math:`\mathbf{\Phi}_{p+1}` is not currently implemented, and it is set to
a zero matrix.
"""
p = len(gamma) - 1
q = gamma[0].shape[2]
n = np.prod(gamma[0].shape[:-2])
for i in range(1, len(gamma)):
if gamma[i].shape != gamma[0].shape:
raise ValueError(
"dimension mismatch: gamma[%d].shape=%s, but %s expected"
% (i, str(gamma[i].shape), str(gamma[0].shape))
)
if d not in [0, 1]:
raise ValueError("d = %d, but 0 or 1 required" % d)
gamma_1d = [g.reshape((n, q, q)) for g in gamma]
phi_out = [np.zeros([n, q, q]) for i in range(p)]
for k in range(n):
a = np.empty((p * q, p * q))
for i in range(p):
for j in range(p):
a_tmp = gamma_1d[abs(i - j)][k, :]
if i > j:
a_tmp = a_tmp.T
a[i * q : (i + 1) * q, j * q : (j + 1) * q] = a_tmp
b = np.vstack([gamma_1d[i][k, :].T for i in range(1, p + 1)])
x = np.linalg.solve(a, b)
for i in range(p):
phi_out[i][k, :, :] = x[i * q : (i + 1) * q, :]
for i in range(len(phi_out)):
phi_out[i] = phi_out[i].reshape(np.hstack([gamma[0].shape[:-2], [q, q]]))
if d == 1:
phi_out = _compute_differenced_model_params(phi_out, p, 1, 1)
phi_out.append(np.zeros(gamma[0].shape))
return phi_out
def iterate_ar_model(x, phi, eps=None):
r"""Apply an AR(p) model
:math:`x_{k+1}=\phi_1 x_k+\phi_2 x_{k-1}+\dots+\phi_p x_{k-p}+\phi_{p+1}\epsilon`
to a time series :math:`x_k`.
Parameters
----------
x: array_like
Array of shape (n,...), n>=p, containing a time series of a input variable
x. The elements of x along the first dimension are assumed to be in
ascending order by time, and the time intervals are assumed to be regular.
phi: list
List or array of length p+1 specifying the parameters of the AR(p) model.
The parameters are in ascending order by increasing time lag, and the
last element is the parameter corresponding to the innovation term eps.
eps: array_like
Optional innovation term for the AR(p) process. The shape of eps is
expected to be a scalar or x.shape[1:] if len(x.shape)>1. If eps is
None, the innovation term is not added.
"""
if x.shape[0] < len(phi) - 1:
raise ValueError(
"dimension mismatch between x and phi: x.shape[0]=%d, len(phi)=%d"
% (x.shape[0], len(phi))
)
if len(x.shape) == 1:
x_simple_shape = True
x = x[:, np.newaxis]
else:
x_simple_shape = False
if eps is not None and eps.shape != x.shape[1:]:
raise ValueError(
"dimension mismatch between x and eps: x.shape=%s, eps.shape[1:]=%s"
% (str(x.shape), str(eps.shape[1:]))
)
x_new = 0.0
p = len(phi) - 1
for i in range(p):
x_new += phi[i] * x[-(i + 1), :]
if eps is not None:
x_new += phi[-1] * eps
if x_simple_shape:
return np.hstack([x[1:], [x_new]])
else:
return np.concatenate([x[1:, :], x_new[np.newaxis, :]])
def iterate_var_model(x, phi, eps=None):
r"""Apply a VAR(p) model
:math:`\mathbf{x}_{k+1}=\mathbf{\Phi}_1\mathbf{x}_k+\mathbf{\Phi}_2
\mathbf{x}_{k-1}+\dots+\mathbf{\Phi}_p\mathbf{x}_{k-p}+
\mathbf{\Phi}_{p+1}\mathbf{\epsilon}`
to a q-variate time series :math:`\mathbf{x}_k`.
Parameters
----------
x: array_like
Array of shape (n,q,...), n>=p, containing a q-variate time series of a
input variable x. The elements of x along the first dimension are
assumed to be in ascending order by time, and the time intervals are
assumed to be regular.
phi: list
List of parameter matrices :math:`\mathbf{\Phi}_1,\mathbf{\Phi}_2,\dots,
\mathbf{\Phi}_{p+1}`.
eps: array_like
Optional innovation term for the AR(p) process. The shape of eps is
expected to be (x.shape[1],) or (x.shape[1],x.shape[2:]) if
len(x.shape)>2. If eps is None, the innovation term is not added.
"""
if x.shape[0] < len(phi) - 1:
raise ValueError(
"dimension mismatch between x and phi: x.shape[0]=%d, len(phi)=%d"
% (x.shape[1], len(phi))
)
phi_shape = phi[0].shape
if phi_shape[-1] != phi_shape[-2]:
raise ValueError(
"phi[0].shape = %s, but the last two dimensions are expected to be equal"
% str(phi_shape)
)
for i in range(1, len(phi)):
if phi[i].shape != phi_shape:
raise ValueError("dimension mismatch between parameter matrices phi")
if len(x.shape) == 2:
x_simple_shape = True
x = x[:, :, np.newaxis]
else:
x_simple_shape = False
x_new = np.zeros(x.shape[1:])
p = len(phi) - 1
for l in range(p):
x_new += np.einsum("...ij,j...->i...", phi[l], x[-(l + 1), :])
if eps is not None:
x_new += np.dot(np.dot(phi[-1], phi[-1]), eps)
if x_simple_shape:
return np.vstack([x[1:, :, 0], x_new[:, 0]])
else:
x_new = x_new.reshape(x.shape[1:])
return np.concatenate([x[1:, :], x_new[np.newaxis, :, :]], axis=0)
def test_ar_stationarity(phi):
r"""
Test stationarity of an AR(p) process. That is, test that the roots of
the equation :math:`x^p-\phi_1*x^{p-1}-\dots-\phi_p` lie inside the unit
circle.
Parameters
----------
phi: list
List of AR(p) parameters :math:`\phi_1,\phi_2,\dots,\phi_p`.
Returns
-------
out: bool
True/False if the process is/is not stationary.
"""
r = np.array(
[
np.abs(r_)
for r_ in np.roots([1.0 if i == 0 else -phi[i] for i in range(len(phi))])
]
)
return False if np.any(r >= 1) else True
def test_var_stationarity(phi):
r"""
Test stationarity of an AR(p) process. That is, test that the moduli of
the eigenvalues of the companion matrix lie inside the unit circle.
Parameters
----------
phi: list
List of VAR(p) parameter matrices :math:`\mathbf{\Phi}_1,\mathbf{\Phi}_2,
\dots,\mathbf{\Phi}_p`.
Returns
-------
out: bool
True/False if the process is/is not stationary.
"""
q = phi[0].shape
for i in range(1, len(phi)):
if phi[i].shape != q:
raise ValueError("dimension mismatch between parameter matrices phi")
p = len(phi)
q = phi[0].shape[0]
M = np.zeros((p * q, p * q))
for i in range(p):
M[0:q, i * q : (i + 1) * q] = phi[i]
for i in range(1, p):
M[i * q : (i + 1) * q, (i - 1) * q : i * q] = np.eye(q, q)
r = np.linalg.eig(M)[0]
return False if np.any(np.abs(r) >= 1) else True
def _compute_differenced_model_params(phi, p, q, d):
phi_out = []
for i in range(p + d):
if q > 1:
if len(phi[0].shape) == 2:
phi_out.append(np.zeros((q, q)))
else:
phi_out.append(np.zeros(phi[0].shape))
else:
phi_out.append(0.0)
for i in range(1, d + 1):
if q > 1:
phi_out[i - 1] -= binom(d, i) * (-1) ** i * np.eye(q)
else:
phi_out[i - 1] -= binom(d, i) * (-1) ** i
for i in range(1, p + 1):
phi_out[i - 1] += phi[i - 1]
for i in range(1, p + 1):
for j in range(1, d + 1):
phi_out[i + j - 1] += phi[i - 1] * binom(d, j) * (-1) ** j
return phi_out
|
pySTEPS/pysteps
|
pysteps/timeseries/autoregression.py
|
Python
|
bsd-3-clause
| 39,864
|
[
"Gaussian"
] |
d76817941f538da74503e5d3cb8c90cd3f659154346b4abead766631477c786f
|
#
# Copyright 2014-2017 University of Southern California
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
#
import re
import sys
import os
import csv
import random
import tempfile
import numpy as np
from numpy import array, float32, int32, empty, newaxis, dot, cross, zeros, ones
from numpy.linalg import norm
import json
import math
def Gsigma(sigma):
"""Pickle a gaussian function G(x) for given sigma"""
def G(x):
return (math.e ** (-(x**2)/(2*sigma**2)))/(2 * math.pi* sigma**2)**0.5
return G
def gaussian_kernel(s):
G = Gsigma(s) # G(x) gaussian function
kernel_width = 2 * (int(6.0 * s - 1) // 2) + 1 # force always odd
kernel_radius = (kernel_width - 1) // 2 # doesn't include central cell
kernel = list(map(G, list(range(-kernel_radius, kernel_radius+1))))
mag = sum(kernel)
kernel = [x / mag for x in kernel]
return kernel
def crop_centered(orig, newshape):
return orig[tuple(
diff and slice(diff//2, -(diff//2)) or slice(None)
for diff in map(lambda l, s: l-s, orig.shape, newshape)
)]
def pad_centered(orig, newshape, pad=0):
assert len(newshape) == orig.ndim
for d in range(len(newshape)):
assert newshape[d] >= orig.shape[d]
na = zeros(newshape, dtype=orig.dtype)
def helper1d(dstlen, srclen):
if dstlen > srclen:
return slice((dstlen-srclen)//2, -(dstlen-srclen)//2)
else:
return None
na[ tuple( map(helper1d, na.shape, orig.shape) ) ] = orig
return na
def compose_3d_kernel(klist):
zk, yk, xk = klist
def mult(a, b):
return array(
[ a[i] * array(b) for i in range(len(a)) ],
dtype=float32
)
result = mult(zk, mult(yk, xk))
return result
def clamp_center_edge(orig, axis=0):
return orig * (orig >= orig[
tuple(
orig.shape[d]//2 for d in range(axis)
) + (0,) + tuple(
orig.shape[d]//2 for d in range(axis+1, 3)
)
])
def numstr(x):
s = "%f" % x
m = re.match("""^(?P<whole>[0-9]*)[.](?P<frac>(?:[0-9]*[1-9])?)(?P<trail>0*)$""", s)
g = m.groupdict()
if g['frac']:
return "%(whole)s.%(frac)s" % g
else:
return "%(whole)s" % g
def kernel_diameters(s):
"""Return a 3-tuple from JSON string X or array [Z, Y, X].
If a single value X is provided,
"""
v = json.loads(s)
if type(v) is array:
assert len(v) == 3
return tuple(map(float, v))
else:
v = float(v)
return tuple(2*v, v, v)
def prepare_kernels(gridsize, synapse_diam_microns, vicinity_diam_microns, redblur_microns):
"""Prepare synapse-detection convolution kernels.
Parameters:
gridsize: the micron step size of the image in (Z, Y, X) axes
synapse_diam_microns: core synapse feature span
vicinity_diam_microns: synapse local background span
redblur_microns: auto-fluourescence blurring span
All span arguments are 3-tuples of micron lengths of the
standard-deviation of the related Gaussian distribution in each
image dimension (Z, Y, X).
Result is a 2-tuple:
( kernels_3x1d, kernels_3d ).
The kernels_3x1d result is a 2-tuple:
( low_3x1d, span_3x1d )
where each kernel is a 3-tuple of numpy arrays, each array
being weights of a 1D kernel for each image dimension (Z, Y,
X). The low_3x1d kernel is float weights summing to 1.0 while
the span_3x1d kernel is a binary mask.
The kernels_3d result is a 3-tuple
( core_3d, hollow_3d, red_3d )
where each field is a numpy array, each array being weights of
a 3D kernel. The kernels are float weights summing to 1.0.
"""
try:
peak_factor = float(os.getenv('PEAKS_DIAM_FACTOR', 0.75))
except ValueError as te:
print('ERROR: invalid PEAKS_DIAM_FACTOR environment value')
raise
# these are separated 1d gaussian kernels
syn_kernels = list(map(lambda d, s: gaussian_kernel(d/s/6.), synapse_diam_microns, gridsize))
low_kernels = list(map(lambda d, s: gaussian_kernel(peak_factor*d/s/6.), synapse_diam_microns, gridsize))
vlow_kernels = list(map(lambda d, s: gaussian_kernel(d/s/6.), vicinity_diam_microns, gridsize))
span_kernels = list(map(lambda d, s: (1,) * (2*(int(d/s)//2)+1), vicinity_diam_microns, gridsize))
# TODO: investigate variants?
# adjust diameter by a fudge factor?
core_kernel = compose_3d_kernel(syn_kernels)
span_kernel = compose_3d_kernel(vlow_kernels)
if True:
# truncate to ellipsoid region
core_kernel = clamp_center_edge(core_kernel)
span_kernel = clamp_center_edge(span_kernel)
hollow_kernel = span_kernel * (pad_centered(core_kernel, span_kernel.shape) <= 0)
max_kernel = ones(list(map(lambda d, s: 2*(int(0.7*d/s)//2)+1, synapse_diam_microns, gridsize)), dtype=float32)
# sanity check kernel shapes
for d in range(3):
if len(syn_kernels[d]) <= 1:
raise ValueError(
'Synapse diameter %f and gridsize %f result in undersized synapse kernel!'
% (synapse_diam_microns[d], gridsize[d])
)
if len(low_kernels[d]) <= 1:
raise ValueError(
'Synapse diameter %f, peak_diam_factor %f, and gridsize %f result in undersized low-pass kernel!'
% (synapse_diam_microns[d], peak_factor, gridsize[d])
)
if hollow_kernel.shape[d] - core_kernel.shape[d] <= 1:
raise ValueError(
'Synapse diameter %f, vicinity diameter %f, and gridsize %f result in undersized hollow span!'
% (synapse_diam_microns[d], synapse_diam_microns[d], gridsize[d])
)
core_kernel /= core_kernel.sum()
hollow_kernel /= hollow_kernel.sum()
red_kernel = compose_3d_kernel(
list(map(lambda d, s: gaussian_kernel(d/s/6.), redblur_microns, gridsize))
)
return (
(low_kernels, span_kernels, syn_kernels, vlow_kernels),
(core_kernel, hollow_kernel, red_kernel, max_kernel)
)
def radii_3x1d(k3x1d):
return np.array([len(k1d)//2 for k1d in k3x1d], dtype=np.int32)
def radii_3d(k3d):
return np.array([d//2 for d in k3d.shape], dtype=np.int32)
def centroids_zx_swap(centroids):
"""Return a copy of centroids array with Z and X swapped, e.g. ZYX<->XYZ."""
copy = np.zeros(centroids.shape, dtype=centroids.dtype)
copy[:,0] = centroids[:,2]
copy[:,1] = centroids[:,1]
copy[:,2] = centroids[:,0]
return copy
def transform_points(M, v, dtype=np.float32):
"""Apply transform matrix to k XYZ points in v[k,3] array.
This does a standard np.matmul(v, M) after extending all points
in v with the W coordinate 1.0, and renormalizing the results
back to XYZ w/o W coordinate.
"""
assert v.shape[1] == 3
a1 = np.ones((v.shape[0], 4), dtype=np.float64)
a1[:,0:3] = v
a2 = np.matmul(a1,M)
return (a2[:,0:3] / a2[:,3,None]).astype(dtype)
def transform_centroids(M, centroids):
"""Transform each ZYX point in centroids[k,3] using np.dot(M, xyzw) intermediate representation.
Note, the 4x4 transform M may need to be transposed for
np.dot(M, xyzw) to give you the transform you expect!
"""
assert centroids.shape[1] == 3
a1 = np.zeros((centroids.shape[0], 4), dtype=np.float64)
a2 = np.zeros(centroids.shape, dtype=np.float64)
a1[:,0:3] = centroids_zx_swap(centroids)
a1[:,3] = 1.0
for i in range(a1.shape[0]):
p = np.dot(M, a1[i])
a2[i,:] = p[0:3] / p[3]
return centroids_zx_swap(a2).astype(np.float32)
def load_segment_info_from_csv(infilename, zyx_grid_scale=None, zx_swap=False, filter_status=None):
"""Load a segment list and return content as arrays.
"""
csvfile = open(infilename, 'r')
reader = csv.DictReader(csvfile)
centroids = []
measures = []
status = []
saved_params = None
for row in reader:
# newer dump files have an extra saved-parameters row first...
if row['Z'] == 'saved' and row['Y'] == 'parameters':
saved_params = row
continue
centroids.append(
(int(row['Z']), int(row['Y']), int(row['X']))
)
measures.append(
(float(row['raw core']), float(row['raw hollow']), float(row['DoG core']), float(row['DoG hollow']))
+ ((float(row['red']),) if 'red' in row else ())
)
status.append(
int(row['override']) if row['override'] else 0
)
centroids = np.array(centroids, dtype=np.int32)
measures = np.array(measures, dtype=np.float32)
status = np.array(status, dtype=np.uint8)
if zyx_grid_scale is not None:
zyx_grid_scale = np.array(zyx_grid_scale, dtype=np.float32)
assert zyx_grid_scale.shape == (3,)
centroids = (centroids * zyx_grid_scale).astype(np.float32)
if filter_status is not None:
filter_idx = np.zeros(status.shape, dtype=np.bool)
for value in filter_status:
filter_idx += (status == value)
centroids = centroids[filter_idx]
measures = measures[filter_idx]
status = status[filter_idx]
return (
centroids_zx_swap(centroids) if zx_swap else centroids,
measures,
status,
saved_params
)
def load_segment_status_from_csv(centroids, offset_origin, infilename):
"""Load a segment list with manual override status values validating against expected centroid list.
Arguments:
centroids: Nx3 array of Z,Y,X segment coordinates
offset_origin: CSV coordinates = offset_origin + centroid coordinates
infilename: file to open to read CSV content
Returns tuple with:
status array (1D),
saved params dict or None
"""
csv_centroids, csv_measures, csv_status, saved_params = load_segment_info_from_csv(infilename)
if csv_centroids.shape[0] > 0:
csv_centroids -= np.array(offset_origin, dtype=np.int32)
return dense_segment_status(centroids, csv_centroids, csv_status), saved_params
def dense_segment_status(centroids, sparse_centroids, sparse_status):
"""Construct dense segment status from sparse info, e.g. previously loaded from CSV."""
# assume that dump is ordered subset of current analysis
status = np.zeros((centroids.shape[0],), dtype=np.uint8)
i = 0
for row in range(sparse_centroids.shape[0]):
# scan forward until we find same centroid in sparse subset
while i < centroids.shape[0] and tuple(sparse_centroids[row]) != tuple(centroids[i]):
i += 1
if i >= centroids.shape[0]:
raise ValueError("Sparse dump does not match image analysis!", sparse_centroids[row])
if sparse_status[row]:
status[i] = sparse_status[row]
return status
def dump_segment_info_to_csv(centroids, measures, status, offset_origin, outfilename, saved_params=None, all_segments=True, zx_swap=False, zyx_grid_scale=None, filter_status=None):
"""Load a segment list with manual override status values validating against expected centroid list.
Arguments:
centroids: Nx3 array of Z,Y,X segment coordinates
measures: NxK array of segment measures
status: N array of segment status
offset_origin: CSV coordinates = offset_origin + centroid coordinates
outfilename: file to open to write CSV content
saved_params: dict or None if saving threshold params row
all_segments: True: dump all, False: dump only when matching filter_status values
zx_swap: True: input centroids are in X,Y,Z order
zyx_grid_scale: input centroids have been scaled by these coefficients in Z,Y,X order
filter_status: set of values to include in outputs or None implies all non-zero values
"""
if zx_swap:
centroids = centroids_zx_swap(centroids)
if zyx_grid_scale is not None:
zyx_grid_scale = np.array(zyx_grid_scale, dtype=np.float32)
assert zyx_grid_scale.shape == (3,)
centroids = centroids * zyx_grid_scale
# correct dumped centroids to global coordinate space of unsliced source image
centroids = centroids + np.array(offset_origin, np.int32)
csvfile = open(outfilename, 'w', newline='')
writer = csv.writer(csvfile)
writer.writerow(
('Z', 'Y', 'X', 'raw core', 'raw hollow', 'DoG core', 'DoG hollow')
+ (('red',) if (measures.shape[1] == 5) else ())
+ ('override',)
)
if saved_params:
writer.writerow(
(
'saved',
'parameters',
saved_params.get('X', ''),
saved_params.get('raw core', ''),
saved_params.get('raw hollow', ''),
saved_params.get('DoG core', ''),
saved_params.get('DoG hollow', ''),
)
+ ((saved_params.get('red', ''),) if 'red' in saved_params else ())
+ (saved_params.get('override', ''),)
)
filter_idx = np.zeros(status.shape, dtype=np.bool)
if all_segments:
filter_idx += np.bool(1)
elif filter_status is not None:
for value in filter_status:
filter_idx += (status == value)
else:
filter_idx += (status > 0)
indices = (status > 0).nonzero()[0]
for i in indices:
Z, Y, X = centroids[i]
writer.writerow(
(Z, Y, X) + tuple(measures[i,m] for m in range(measures.shape[1])) + (status[i] or '',)
)
del writer
csvfile.close()
def load_registered_csv(hatrac_store, object_path):
"""Load a registered segment list from the object store.
Arguments:
hatrac_store: an instance of deriva.core.HatracStore
object_path: the path of the registered segment list CSV object in the store
Returns:
a: an array of shape (N, k) of type float32
The array has N centroids and k values packed as:
Z, Y, X, raw core, raw hollow, DoG core, DoG hollow (, red)?, override
"""
r = hatrac_store.get(object_path, stream=True)
r.raise_for_status()
reader = csv.DictReader(r.iter_lines(decode_unicode=True, chunk_size=1024**2))
rows = []
for row in reader:
if row['Z'] == 'saved':
continue
rows.append(
tuple([
row[k]
for k in ['Z', 'Y', 'X', 'raw core', 'raw hollow', 'DoG core', 'DoG hollow', 'red', 'override']
if k in row
])
)
rows = np.array(rows, dtype=np.float32)
return rows
def get_hatrac_object_cached(hatrac_store, object_path, cache_dir, suffix=''):
resp = hatrac_store.head(object_path)
md5 = resp.headers['content-md5']
if cache_dir is not None:
fname = '%s/%s%s' % (cache_dir, md5.replace('/', '_'), suffix)
if os.path.isfile(fname):
return fname
else:
fd, fname = tempfile.mkstemp(suffix=suffix)
os.close(fd)
hatrac_store.get_obj(object_path, destfilename=fname)
return fname
def load_registered_npz(hatrac_store, object_path, alignment=None, csv_object_path=None, cache_dir=None):
"""Load registered segment list from the object store.
Arguments:
hatrac_store: from where to fetch object content
object_path: specific NPZ object within hatrac_store
alignment: 4x4 matrix to transform micron-spaced coordinates (or None)
csv_object_path: specific CSV object within hatrac_store (or None)
cache_dir: name of local directory to use as download cache for object reuse
If csv_object_path is specified, interpret it as a sparse
subject of the same pointcloud and merge its status field into
result.
Returns:
a: an array of shape (N, k) of type float32
The array has N centroids and k values packed as:
Z, Y, X, raw core, raw hollow, DoG core, DoG hollow (, red)?, override
"""
if alignment is not None:
alignment = np.array(alignment, np.float64)
else:
alignment = np.eye(4, dtype=np.float64)
try:
fname = get_hatrac_object_cached(hatrac_store, object_path, cache_dir, '.npz')
if csv_object_path:
fnamec = get_hatrac_object_cached(hatrac_store, csv_object_path, cache_dir, '.csv')
csv_centroids, csv_measures, csv_status, csv_saved_params = load_segment_info_from_csv(fnamec)
with np.load(fname) as parts:
properties = json.loads(parts['properties'].tostring().decode('utf8'))
measures = parts['measures'].astype(np.float32) * np.float32(properties['measures_divisor'])
slice_origin = np.array(properties['slice_origin'], dtype=np.int32)
centroids = parts['centroids'].astype(np.int32) + slice_origin
image_grid = np.array(properties['image_grid'], dtype=np.float32)
result = np.zeros((centroids.shape[0], 3 + measures.shape[1] + 1), np.float32)
result[:,0:3] = transform_centroids(alignment, centroids * image_grid)
result[:,3:-1] = measures[:,:]
if csv_object_path:
result[:,-1] = dense_segment_status(centroids, csv_centroids, csv_status)
return result
finally:
if cache_dir is None:
if csv_object_path and fnamec:
os.unlink(fnamec)
if fname:
os.unlink(fname)
def matrix_ident():
"""Produce indentity transform."""
return np.identity(4).astype(np.float64)
def matrix_translate(displacement_xyz):
"""Produce translation transform matrix."""
M = matrix_ident()
M[3,0:3] = displacement_xyz
return M
def matrix_scale(s):
"""Produce scaling transform matrix with uniform scale s in all 3 dimensions."""
M = matrix_ident()
M[0:3,0:3] = np.diag([ s, s, s ]).astype(np.float64)
return M
def matrix_rotate(axis, radians):
"""Produce rotation transform matrix about axis through origin."""
s = math.sin(radians)
c = math.cos(radians)
C = 1 - c
x, y, z = axis / np.linalg.norm(axis)
R = np.array(
[
[ x*x*C + c, x*y*C - z*s, x*z*C + y*s ],
[ y*x*C + z*s, y*y*C + c, y*z*C - x*s ],
[ z*x*C - y*s, z*y*C + x*s, z*z*C + c ],
],
dtype=np.float64
)
M = matrix_ident()
M[0:3,0:3] = R
return M
x_axis = np.array([1,0,0], np.float64)
y_axis = np.array([0,1,0], np.float64)
z_axis = np.array([0,0,1], np.float64)
|
informatics-isi-edu/synspy
|
synspy/analyze/util.py
|
Python
|
bsd-3-clause
| 18,746
|
[
"Gaussian"
] |
fb90f385a9f375df2c67e57e04b07989436d0ea4267a8a0214b52d50c3214736
|
#!/usr/bin/env python
#
# Copyright (c) 2012 OpenDNS, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the OpenDNS nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL OPENDNS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Class to implement draft-ietf-dnsop-edns-client-subnet (previously known as
draft-vandergaast-edns-client-subnet.
The contained class supports both IPv4 and IPv6 addresses.
Requirements:
dnspython (http://www.dnspython.org/)
"""
from __future__ import print_function
from __future__ import division
import socket
import struct
import dns
import dns.edns
import dns.flags
import dns.message
import dns.query
__author__ = "bhartvigsen@opendns.com (Brian Hartvigsen)"
__version__ = "2.0.0"
ASSIGNED_OPTION_CODE = 0x0008
DRAFT_OPTION_CODE = 0x50FA
FAMILY_IPV4 = 1
FAMILY_IPV6 = 2
SUPPORTED_FAMILIES = (FAMILY_IPV4, FAMILY_IPV6)
class ClientSubnetOption(dns.edns.Option):
"""Implementation of draft-vandergaast-edns-client-subnet-01.
Attributes:
family: An integer inidicating which address family is being sent
ip: IP address in integer notation
mask: An integer representing the number of relevant bits being sent
scope: An integer representing the number of significant bits used by
the authoritative server.
"""
def __init__(self, ip, bits=24, scope=0, option=ASSIGNED_OPTION_CODE):
super(ClientSubnetOption, self).__init__(option)
n = None
f = None
for family in (socket.AF_INET, socket.AF_INET6):
try:
n = socket.inet_pton(family, ip)
if family == socket.AF_INET6:
f = FAMILY_IPV6
hi, lo = struct.unpack('!QQ', n)
ip = hi << 64 | lo
elif family == socket.AF_INET:
f = FAMILY_IPV4
ip = struct.unpack('!L', n)[0]
except Exception:
pass
if n is None:
raise Exception("%s is an invalid ip" % ip)
self.family = f
self.ip = ip
self.mask = bits
self.scope = scope
self.option = option
if self.family == FAMILY_IPV4 and self.mask > 32:
raise Exception("32 bits is the max for IPv4 (%d)" % bits)
if self.family == FAMILY_IPV6 and self.mask > 128:
raise Exception("128 bits is the max for IPv6 (%d)" % bits)
def calculate_ip(self):
"""Calculates the relevant ip address based on the network mask.
Calculates the relevant bits of the IP address based on network mask.
Sizes up to the nearest octet for use with wire format.
Returns:
An integer of only the significant bits sized up to the nearest
octect.
"""
if self.family == FAMILY_IPV4:
bits = 32
elif self.family == FAMILY_IPV6:
bits = 128
ip = self.ip >> bits - self.mask
if (self.mask % 8 != 0):
ip = ip << 8 - (self.mask % 8)
return ip
def is_draft(self):
"""" Determines whether this instance is using the draft option code """
return self.option == DRAFT_OPTION_CODE
def to_wire(self, file):
"""Create EDNS packet as definied in draft-vandergaast-edns-client-subnet-01."""
ip = self.calculate_ip()
mask_bits = self.mask
if mask_bits % 8 != 0:
mask_bits += 8 - (self.mask % 8)
if self.family == FAMILY_IPV4:
test = struct.pack("!L", ip)
elif self.family == FAMILY_IPV6:
test = struct.pack("!QQ", ip >> 64, ip & (2 ** 64 - 1))
test = test[-(mask_bits // 8):]
format = "!HBB%ds" % (mask_bits // 8)
data = struct.pack(format, self.family, self.mask, 0, test)
file.write(data)
def from_wire(cls, otype, wire, current, olen):
"""Read EDNS packet as defined in draft-vandergaast-edns-client-subnet-01.
Returns:
An instance of ClientSubnetOption based on the ENDS packet
"""
data = wire[current:current + olen]
(family, mask, scope) = struct.unpack("!HBB", data[:4])
c_mask = mask
if mask % 8 != 0:
c_mask += 8 - (mask % 8)
ip = struct.unpack_from("!%ds" % (c_mask // 8), data, 4)[0]
if (family == FAMILY_IPV4):
ip = ip + b'\0' * ((32 - c_mask) // 8)
ip = socket.inet_ntop(socket.AF_INET, ip)
elif (family == FAMILY_IPV6):
ip = ip + b'\0' * ((128 - c_mask) // 8)
ip = socket.inet_ntop(socket.AF_INET6, ip)
else:
raise Exception("Returned a family other then IPv4 or IPv6")
return cls(ip, mask, scope, otype)
from_wire = classmethod(from_wire)
def __repr__(self):
if self.family == FAMILY_IPV4:
ip = socket.inet_ntop(socket.AF_INET, struct.pack('!L', self.ip))
elif self.family == FAMILY_IPV6:
ip = socket.inet_ntop(socket.AF_INET6,
struct.pack('!QQ',
self.ip >> 64,
self.ip & (2 ** 64 - 1)))
return "%s(%s, %s, %s)" % (
self.__class__.__name__,
ip,
self.mask,
self.scope
)
def __eq__(self, other):
"""Rich comparison method for equality.
Two ClientSubnetOptions are equal if their relevant ip bits, mask, and
family are identical. We ignore scope since generally we want to
compare questions to responses and that bit is only relevant when
determining caching behavior.
Returns:
boolean
"""
if not isinstance(other, ClientSubnetOption):
return False
if self.calculate_ip() != other.calculate_ip():
return False
if self.mask != other.mask:
return False
if self.family != other.family:
return False
return True
def __ne__(self, other):
"""Rich comparison method for inequality.
See notes for __eq__()
Returns:
boolean
"""
return not self.__eq__(other)
dns.edns._type_to_class[DRAFT_OPTION_CODE] = ClientSubnetOption
dns.edns._type_to_class[ASSIGNED_OPTION_CODE] = ClientSubnetOption
if __name__ == "__main__":
import argparse
import sys
def CheckForClientSubnetOption(addr, args, option_code=ASSIGNED_OPTION_CODE):
print("Testing for edns-clientsubnet using option code", hex(option_code), file=sys.stderr)
cso = ClientSubnetOption(args.subnet, args.mask, option=option_code)
message = dns.message.make_query(args.rr, args.type)
# Tested authoritative servers seem to use the last code in cases
# where they support both. We make the official code last to allow
# us to check for support of both draft and official
message.use_edns(options=[cso])
try:
r = dns.query.udp(message, addr, timeout=args.timeout)
if r.flags & dns.flags.TC:
r = dns.query.tcp(message, addr, timeout=args.timeout)
except dns.exception.Timeout:
print("Timeout: No answer received from %s\n" % args.nameserver, file=sys.stderr)
sys.exit(3)
error = False
found = False
for options in r.options:
# Have not run into anyone who passes back both codes yet
# but just in case, we want to check all possible options
if isinstance(options, ClientSubnetOption):
found = True
print("Found ClientSubnetOption...", end=None, file=sys.stderr)
if not cso.family == options.family:
error = True
print("\nFailed: returned family (%d) is different from the passed family (%d)" % (options.family, cso.family), file=sys.stderr)
if not cso.calculate_ip() == options.calculate_ip():
error = True
print("\nFailed: returned ip (%s) is different from the passed ip (%s)." % (options.calculate_ip(), cso.calculate_ip()), file=sys.stderr)
if not options.mask == cso.mask:
error = True
print("\nFailed: returned mask bits (%d) is different from the passed mask bits (%d)" % (options.mask, cso.mask), file=sys.stderr)
if not options.scope != 0:
print("\nWarning: scope indicates edns-clientsubnet data is not used", file=sys.stderr)
if options.is_draft():
print("\nWarning: detected support for edns-clientsubnet draft code", file=sys.stderr)
if found and not error:
print("Success", file=sys.stderr)
elif found:
print("Failed: See error messages above", file=sys.stderr)
else:
print("Failed: No ClientSubnetOption returned", file=sys.stderr)
parser = argparse.ArgumentParser(description='draft-vandergaast-edns-client-subnet-01 tester')
parser.add_argument('nameserver', help='The nameserver to test')
parser.add_argument('rr', help='DNS record that should return an EDNS enabled response')
parser.add_argument('-s', '--subnet', help='Specifies an IP to pass as the client subnet.', default='192.0.2.0')
parser.add_argument('-m', '--mask', type=int, help='CIDR mask to use for subnet')
parser.add_argument('--timeout', type=int, help='Set the timeout for query to TIMEOUT seconds, default=10', default=10)
parser.add_argument('-t', '--type', help='DNS query type, default=A', default='A')
args = parser.parse_args()
if not args.mask:
if ':' in args.subnet:
args.mask = 48
else:
args.mask = 24
try:
addr = socket.gethostbyname(args.nameserver)
except socket.gaierror:
print("Unable to resolve %s\n" % args.nameserver, file=sys.stderr)
sys.exit(3)
CheckForClientSubnetOption(addr, args, DRAFT_OPTION_CODE)
print("", file=sys.stderr)
CheckForClientSubnetOption(addr, args, ASSIGNED_OPTION_CODE)
|
hlindqvist/pdns
|
regression-tests.dnsdist/clientsubnetoption.py
|
Python
|
gpl-2.0
| 11,549
|
[
"Brian"
] |
31b073ace28955d44c7dd59b92ed48778a9f93a8afe5c5872ef88c2235b54384
|
from sequana import sequana_data
from multiqc.utils import report
try:
# Since sept 2017 and a bioconda travis integrationupdate,
# this test fails due to an error in spectra/colormath:
# self.conversion_graph.add_edge(start_type, target_type,
# {'conversion_function': conversion_function})
#E TypeError: add_edge() takes 3 positional arguments but 4 were given
from sequana.multiqc import pacbio_qc, quality_control, coverage
from sequana.multiqc import bamtools_stats, kraken
def test_pacbio():
# When calling multiqc on the command line, it scans the directory
# to identify the files to include in the singleton "report";
# HEre, because we do not use the standalone app, the report.files is empty
# so we populate it by hand. Moreovoer, the path are altered to look for
# files in the sequana/resources/testing directory instead of local
# directory. Because we populate the report.files ourself, we can put
# whatever name except it the MultiqcModule expects a specific name
report.files = {"sequana_pacbio_qc":
[{'filesize': 5913, 'fn': sequana_data('summary_pacbio_qc1.json'), 'root': '.'},
{'filesize': 5731, 'fn': sequana_data('summary_pacbio_qc2.json'), 'root': '.'},
{'filesize': 5820, 'fn': sequana_data('summary_pacbio_qc3.json'), 'root': '.'}]
}
pacbio_qc.MultiqcModule()
def test_quality_control():
report.files = {"sequana_quality_control":
[ { 'fn': sequana_data('summary_qc.json'), 'root': '.'}]
}
quality_control.MultiqcModule()
def test_coverage():
report.files = {"sequana_coverage":
[ { 'fn': sequana_data('summary_coverage1.json'), 'root': '.'},
{ 'fn': sequana_data('summary_coverage1.json'), 'root': '.'}]
}
coverage.MultiqcModule()
def test_sequana_bamtools():
report.files = {"sequana_bamtools_stats":
[ { 'fn': sequana_data('summary_bamtools_stats.txt'), 'root': '.'},
{ 'fn': sequana_data('summary_bamtools_stats.txt'), 'root': '.'}]
}
bamtools_stats.MultiqcModule()
def test_kraken():
report.files = {"sequana_kraken":
[ { 'fn': sequana_data('summary_kraken.json'), 'root': '.'},
]
}
kraken.MultiqcModule()
except TypeError:
pass
except:
raise IOError
|
sequana/sequana
|
test/multiqc/test_multiqc.py
|
Python
|
bsd-3-clause
| 2,465
|
[
"Bioconda"
] |
f5b40d62351260ded1c24ff31ddc42358b86e97186c17e0fbc5af90d5ba89e72
|
#!/usr/bin/env python
import pysam
import numpy as np
import argparse
import sys
ap = argparse.ArgumentParser(description="Compute expected insert size")
ap.add_argument("bam", help="Input bam file.")
ap.add_argument("-n", help="Num samples", type=int, default=100000)
ap.add_argument("-M", help="Max isize", type=int, default=10000)
ap.add_argument("-m", help="Max isize", type=int, default=100)
ap.add_argument("--all", help="Print all to this file", default=None)
args = ap.parse_args()
bamFile = pysam.Samfile(args.bam, 'rb')
if (args.all is not None):
allFile = open(args.all, 'w')
reads = {}
nUsed = 0
spans = []
nproc =0
for aln in bamFile.fetch():
if (aln.qname not in reads):
if (np.random.randint(0,50) < 1 and abs(aln.isize) < args.M and abs(aln.isize) > args.m ):
spans.append(abs(aln.isize))
reads[aln.qname]=True
if (args.all is not None):
allFile.write(str(aln.isize) + "\n")
if (len(spans) >= args.n):
break
nproc += 1
if (nproc % 10000 == 0):
sys.stderr.write(str(nproc) + "\t" + str(len(spans)) + "\n")
npspans = np.asarray(spans)
npspans.sort()
print "Num: " + str(len(npspans))
print "Median: " + str(npspans[len(npspans)/2])
print "Mean: " + str(np.mean(npspans))
print "SD: " + str(np.std(npspans))
print "max: " + str(np.max(npspans))
print "max: " + str(np.max(spans))
|
yunlongliukm/chm1_scripts
|
ComputeCloneSize.py
|
Python
|
mit
| 1,403
|
[
"pysam"
] |
d52516c49ccc782a866af97323a76e882769a7165ff4268bdc10af97e4a0a0ee
|
#!/usr/bin/env python
#
# $File: PyExec.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
simu = sim.Simulator(sim.Population(100, loci=1),
rep=2)
simu.evolve(
initOps=[
sim.InitSex(),
sim.InitGenotype(freq=[0.2, 0.8]),
sim.PyExec('traj=[]')
],
matingScheme=sim.RandomMating(),
postOps=[
sim.Stat(alleleFreq=0),
sim.PyExec('traj.append(alleleFreq[0][1])'),
],
gen=5
)
# print Trajectory
print(', '.join(['%.3f' % x for x in simu.dvars(0).traj]))
|
BoPeng/simuPOP
|
docs/PyExec.py
|
Python
|
gpl-2.0
| 1,527
|
[
"VisIt"
] |
57680249b223d8a744ac5a9bb19bdde8be245acceb92eb0a43a78e6f032aaaaa
|
import pysam
def data_points(file_path, start, stop, chrm, step, size):
start, stop, step, size = int(start), int(stop), int(step), int(size)
points = []
w_size = (stop-start)/size
print "compute(): w_size = %s" % w_size
tabixfile = pysam.Tabixfile(file_path)
i_start = start
for i in range(size):
w_vals = []
for gtf in tabixfile.fetch(chrm, i_start, i_start+w_size):
_chrm, _start, _end, _val = gtf.split()
_start, _end, _val = int(_start), int(_end), int(_val)
w_vals.append(_val)
if len(w_vals) > 0:
points.append(sum(w_vals)/len(w_vals))
else:
points.append(0)
i_start = i_start + w_size
print "compute(): len(points) = %s" % len(points)
return points[0:size]
|
drio/bedserver
|
bedserver/compute.py
|
Python
|
mit
| 807
|
[
"pysam"
] |
ec232e766ca971fee1a8c6adc4900bb25b8d342dde15664d583f84fe6c060889
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module contains an algorithm to solve the Linear Assignment Problem.
It has the same functionality as linear_assignment.pyx, but is much slower
as it is vectorized in numpy rather than cython
"""
__author__ = "Will Richards"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Will Richards"
__email__ = "wrichards@mit.edu"
__date__ = "Jan 28, 2013"
import numpy as np
class LinearAssignment:
"""
This class finds the solution to the Linear Assignment Problem.
It finds a minimum cost matching between two sets, given a cost
matrix.
This class is an implementation of the LAPJV algorithm described in:
R. Jonker, A. Volgenant. A Shortest Augmenting Path Algorithm for
Dense and Sparse Linear Assignment Problems. Computing 38, 325-340
(1987)
Args:
costs: The cost matrix of the problem. cost[i,j] should be the
cost of matching x[i] to y[j]. The cost matrix may be
rectangular
epsilon: Tolerance for determining if solution vector is < 0
.. attribute: min_cost:
The minimum cost of the matching
.. attribute: solution:
The matching of the rows to columns. i.e solution = [1, 2, 0]
would match row 0 to column 1, row 1 to column 2 and row 2
to column 0. Total cost would be c[0, 1] + c[1, 2] + c[2, 0]
"""
def __init__(self, costs, epsilon=1e-6):
self.orig_c = np.array(costs, dtype=np.float64)
self.nx, self.ny = self.orig_c.shape
self.n = self.ny
self._inds = np.arange(self.n)
self.epsilon = abs(epsilon)
# check that cost matrix is square
if self.nx > self.ny:
raise ValueError("cost matrix must have at least as many columns as rows")
if self.nx == self.ny:
self.c = self.orig_c
else:
# Can run into precision issues if np.max is used as the fill value (since a
# value of this size doesn't necessarily end up in the solution). A value
# at least as large as the maximin is, however, guaranteed to appear so it
# is a safer choice. The fill value is not zero to avoid choosing the extra
# rows in the initial column reduction step
self.c = np.full((self.n, self.n), np.max(np.min(self.orig_c, axis=1)))
self.c[:self.nx] = self.orig_c
# initialize solution vectors
self._x = np.zeros(self.n, dtype=np.int) - 1
self._y = self._x.copy()
# if column reduction doesn't find a solution, augment with shortest
# paths until one is found
if self._column_reduction():
self._augmenting_row_reduction()
# initialize the reduced costs
self._update_cred()
while -1 in self._x:
self._augment()
self.solution = self._x[:self.nx]
self._min_cost = None
@property
def min_cost(self):
"""
Returns the cost of the best assignment
"""
if self._min_cost:
return self._min_cost
self._min_cost = np.sum(self.c[np.arange(self.nx), self.solution])
return self._min_cost
def _column_reduction(self):
"""
Column reduction and reduction transfer steps from LAPJV algorithm
"""
# assign each column to its lowest cost row, ensuring that only row
# or column is assigned once
i1, j = np.unique(np.argmin(self.c, axis=0), return_index=True)
self._x[i1] = j
# if problem is solved, return
if len(i1) == self.n:
return False
self._y[j] = i1
# reduction_transfer
# tempc is array with previously assigned matchings masked
self._v = np.min(self.c, axis=0)
tempc = self.c.copy()
tempc[i1, j] = np.inf
mu = np.min(tempc[i1, :] - self._v[None, :], axis=1)
self._v[j] -= mu
return True
def _augmenting_row_reduction(self):
"""
Augmenting row reduction step from LAPJV algorithm
"""
unassigned = np.where(self._x == -1)[0]
for i in unassigned:
for _ in range(self.c.size):
# Time in this loop can be proportional to 1/epsilon
# This step is not strictly necessary, so cutoff early
# to avoid near-infinite loops
# find smallest 2 values and indices
temp = self.c[i] - self._v
j1 = np.argmin(temp)
u1 = temp[j1]
temp[j1] = np.inf
j2 = np.argmin(temp)
u2 = temp[j2]
if u1 < u2:
self._v[j1] -= u2 - u1
elif self._y[j1] != -1:
j1 = j2
k = self._y[j1]
if k != -1:
self._x[k] = -1
self._x[i] = j1
self._y[j1] = i
i = k
if k == -1 or abs(u1 - u2) < self.epsilon:
break
def _update_cred(self):
"""
Updates the reduced costs with the values from the
dual solution
"""
ui = self.c[self._inds, self._x] - self._v[self._x]
self.cred = self.c - ui[:, None] - self._v[None, :]
def _augment(self):
"""
Finds a minimum cost path and adds it to the matching
"""
# build a minimum cost tree
_pred, _ready, istar, j, mu = self._build_tree()
# update prices
self._v[_ready] += self._d[_ready] - mu
# augment the solution with the minimum cost path from the
# tree. Follows an alternating path along matched, unmatched
# edges from X to Y
while True:
i = _pred[j]
self._y[j] = i
k = j
j = self._x[i]
self._x[i] = k
if i == istar:
break
self._update_cred()
def _build_tree(self):
"""
Builds the tree finding an augmenting path. Alternates along
matched and unmatched edges between X and Y. The paths are
stored in _pred (new predecessor of nodes in Y), and
self._x and self._y
"""
# find unassigned i*
istar = np.argmin(self._x)
# compute distances
self._d = self.c[istar] - self._v
_pred = np.zeros(self.n, dtype=np.int) + istar
# initialize sets
# READY: set of nodes visited and in the path (whose price gets
# updated in augment)
# SCAN: set of nodes at the bottom of the tree, which we need to
# look at
# T0DO: unvisited nodes
_ready = np.zeros(self.n, dtype=np.bool)
_scan = np.zeros(self.n, dtype=np.bool)
_todo = np.zeros(self.n, dtype=np.bool) + True
while True:
# populate scan with minimum reduced distances
if True not in _scan:
mu = np.min(self._d[_todo])
_scan[self._d == mu] = True
_todo[_scan] = False
j = np.argmin(self._y * _scan)
if self._y[j] == -1 and _scan[j]:
return _pred, _ready, istar, j, mu
# pick jstar from scan (scan always has at least 1)
_jstar = np.argmax(_scan)
# pick i associated with jstar
i = self._y[_jstar]
_scan[_jstar] = False
_ready[_jstar] = True
# find shorter distances
newdists = mu + self.cred[i, :]
shorter = np.logical_and(newdists < self._d, _todo)
# update distances
self._d[shorter] = newdists[shorter]
# update predecessors
_pred[shorter] = i
for j in np.nonzero(np.logical_and(self._d == mu, _todo))[0]:
if self._y[j] == -1:
return _pred, _ready, istar, j, mu
_scan[j] = True
_todo[j] = False
|
fraricci/pymatgen
|
pymatgen/optimization/linear_assignment_numpy.py
|
Python
|
mit
| 8,160
|
[
"pymatgen"
] |
19f15a2a701339b4381348709ddeb333a2688bae209d4b46d8703ea8db37fae5
|
"""
Utilities to create replication transformations
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from DIRAC.TransformationSystem.Client.Transformation import Transformation
from DIRAC import gLogger, S_OK, S_ERROR
def createDataTransformation(
flavour,
targetSE,
sourceSE,
metaKey,
metaValue,
extraData=None,
extraname="",
groupSize=1,
plugin="Broadcast",
tGroup=None,
tBody=None,
enable=False,
):
"""Creates the replication transformation based on the given parameters.
:param str flavour: Flavour of replication to create: Replication or Moving
:param targetSE: Destination for files
:type targetSE: python:list or str
:param sourceSE: Origin of files
:type sourceSE: python:list or str
:param int metaKey: Meta key to identify input files
:param int metaValue: Meta value to identify input files
:param dict metaData: Additional meta data to use to identify input files
:param str extraname: addition to the transformation name, only needed if the same transformation was already created
:param int groupSize: number of files per transformation taks
:param str plugin: plugin to use
:param str tGroup: transformation group to set
:param tBody: transformation body to set
:param bool enable: if true submit the transformation, otherwise dry run
:returns: S_OK (with the transformation object, if successfully added), S_ERROR
"""
metadata = {metaKey: metaValue}
if isinstance(extraData, dict):
metadata.update(extraData)
gLogger.debug("Using %r for metadata search" % metadata)
if isinstance(targetSE, six.string_types):
targetSE = [targetSE]
if isinstance(sourceSE, (list, tuple)):
sourceSE = "%s" % (",".join(sourceSE))
gLogger.debug("Using plugin: %r" % plugin)
if flavour not in ("Replication", "Moving"):
return S_ERROR("Unsupported flavour %s" % flavour)
transVerb = {"Replication": "Replicate", "Moving": "Move"}[flavour]
transGroup = {"Replication": "Replication", "Moving": "Moving"}[flavour] if not tGroup else tGroup
trans = Transformation()
transName = "%s_%s_%s" % (transVerb, str(metaValue), ",".join(targetSE))
if extraname:
transName += "_%s" % extraname
trans.setTransformationName(transName)
description = "%s files for %s %s to %s" % (transVerb, metaKey, str(metaValue), ",".join(targetSE))
trans.setDescription(description[:255])
trans.setLongDescription(description)
trans.setType("Replication")
trans.setTransformationGroup(transGroup)
trans.setGroupSize(groupSize)
trans.setPlugin(plugin)
transBody = (
{
"Moving": [
("ReplicateAndRegister", {"SourceSE": sourceSE, "TargetSE": targetSE}),
("RemoveReplica", {"TargetSE": sourceSE}),
],
"Replication": "", # empty body
}[flavour]
if tBody is None
else tBody
)
trans.setBody(transBody)
trans.setInputMetaQuery(metadata)
if sourceSE:
res = trans.setSourceSE(sourceSE)
if not res["OK"]:
return S_ERROR("SourceSE not valid: %s" % res["Message"])
res = trans.setTargetSE(targetSE)
if not res["OK"]:
return S_ERROR("TargetSE not valid: %s" % res["Message"])
if not enable:
gLogger.always("Dry run, not creating transformation")
return S_OK()
res = trans.addTransformation()
if not res["OK"]:
return res
gLogger.verbose(res)
trans.setStatus("Active")
trans.setAgentType("Automatic")
gLogger.always("Successfully created replication transformation")
return S_OK(trans)
|
ic-hep/DIRAC
|
src/DIRAC/TransformationSystem/Utilities/ReplicationTransformation.py
|
Python
|
gpl-3.0
| 3,787
|
[
"DIRAC"
] |
faeb006fc7bc2dcb6d9574dca83d9177d4df080de0e3099af68dd0255f55912f
|
"""Models
This module documented some training models
+ Feedforward Neural Network (including ConvNets)
"""
from telaugesa.optimize import corrupt_input;
from telaugesa.optimize import apply_dropout;
class FeedForward(object):
"""Feedforward Neural Network model"""
def __init__(self,
layers=None,
dropout=None):
"""Initialize feedforward model
Parameters
----------
in_dim : int
number of input size
layers : list
list of layers
drouput : list
list of dropout mask
"""
self.layers=layers;
if dropout is None:
self.dropout=[];
else:
self.dropout=dropout;
def fprop(self,
X):
"""Forward propagation
Parameters
----------
X : matrix or 4D tensor
input samples, the size is (number of cases, in_dim)
Returns
-------
out : list
output list from each layer
"""
out=[];
level_out=X;
for k, layer in enumerate(self.layers):
if len(self.dropout)>0:
level_out=apply_dropout(level_out, self.dropout[k]);
level_out=layer.apply(level_out);
out.append(level_out);
return out;
@property
def params(self):
return [param for layer in self.layers if hasattr(layer, 'params') for param in layer.params];
class AutoEncoder(object):
"""AutoEncoder model for MLP layers
This model only checking the condition of auto-encoders,
the training is done by FeedForward model
"""
def __init__(self, layers=None):
"""Initialize AutoEncoder
Parameters
----------
layers : tuple
list of MLP layers
"""
self.layers=layers;
self.check();
def check(self):
"""Check the validity of an AutoEncoder
"""
assert self.layers[0].get_dim("input")==self.layers[-1].get_dim("output"), \
"Input dimension is not match to output dimension";
for layer in self.layers:
assert hasattr(layer, 'params'), \
"Layer doesn't have necessary parameters";
def fprop(self,
X,
corruption_level=None,
noise_type="binomial",
epoch=None,
decay_rate=1.):
"""Forward pass of auto-encoder
Parameters
----------
X : matrix
number of samples in (number of samples, dim of sample)
corruption_level : float
corruption_level on data
noise_type : string
type of noise: "binomial" or "gaussian"
Returns
-------
out : matrix
output list for each layer
"""
out=[];
if epoch is not None:
self.corruption_level=corruption_level*(epoch**(-decay_rate));
else:
self.corruption_level=corruption_level;
if self.corruption_level == None:
level_out=X;
else:
level_out=corrupt_input(X, self.corruption_level, noise_type);
for k, layer in enumerate(self.layers):
level_out=layer.apply(level_out);
out.append(level_out);
return out;
@property
def params(self):
return [param for layer in self.layers if hasattr(layer, 'params') for param in layer.params];
class ConvAutoEncoder(object):
"""Convolutional Auto-Encoder model"""
def __init__(self, layers):
"""Initialize ConvAE
Parameters
----------
layers : tuple
list of feedforward layers
"""
self.layers=layers;
self.check();
def check(self):
"""Checking the validity of a ConvAutoEncoder"""
assert self.layers[0].get_dim("input")==self.layers[-1].get_dim("output"), \
"Input dimension is not match to output dimension";
def fprop(self,
X,
corruption_level=None,
noise_type="binomial",
epoch=None,
decay_rate=1.):
"""Forward pass of convolutional auto-encoder
Parameters
----------
X : 4D tensor
data in (batch size, channel, height, width)
corruption_level : float
corruption_level on data
noise_type : string
type of noise: "binomial" or "gaussian"
Returns
-------
out : 4-D tensor
output list for each layer
"""
out=[];
if epoch is not None:
self.corruption_level=corruption_level*(epoch**(-decay_rate));
else:
self.corruption_level=corruption_level;
if self.corruption_level is None:
level_out=X;
else:
level_out=corrupt_input(X, self.corruption_level, noise_type);
for k, layer in enumerate(self.layers):
level_out=layer.apply(level_out);
out.append(level_out);
return out;
@property
def params(self):
return [param for layer in self.layers if hasattr(layer, 'params') for param in layer.params];
class ConvKMeans(object):
"""Convolutional K-means"""
def __init__(self, layers):
"""Init a Conv K-means model
Parameters
----------
layers : tuple of size 2
one conv layer, one arg-max pooling layer
"""
assert len(layers)==2, \
"Too many layers for Conv K-means";
self.layers=layers;
def get_layer(self):
"""Get trained convolution layer
"""
return self.layers[0];
def fprop(self, X):
"""Get activation map
Parameters
----------
X : matrix or 4D tensor
input samples, the size is (number of cases, in_dim)
Returns
-------
out : list
output list from each layer
"""
out=[];
level_out=X;
for k, layer in enumerate(self.layers):
level_out=layer.apply(level_out);
out.append(level_out);
return out;
|
duguyue100/telaugesa
|
telaugesa/model.py
|
Python
|
mit
| 6,718
|
[
"Gaussian"
] |
c95ebe83d749529dea8576dfb8abf5c03e43a22f2a48bf92a32736512f571a27
|
#!/usr/bin/env python
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
import unittest
from webtest import TestApp as _TestApp # avoid confusing py.test
from spyne.application import Application
from spyne.decorator import srpc
from spyne.service import Service
from spyne.model.primitive import Integer, Unicode
from spyne.model.complex import Iterable
from spyne.protocol.soap import Soap11
from spyne.protocol.http import HttpRpc
from spyne.protocol.json import JsonDocument
from spyne.server.wsgi import WsgiApplication
from spyne.const.xml import PREFMAP, NS_WSDL11_SOAP
def strip_whitespace(string):
return ''.join(string.split())
class TestOperationRequestSuffix(unittest.TestCase):
"""
test different protocols with REQUEST_SUFFIX and _operation_name
_in_message_name is a concern, will test that as well
"""
default_function_name = 'echo'
# output is not affected, will use soap output for all tests
result_body = '''
<soap11env:Body>
<tns:echoResponse>
<tns:echoResult>
<tns:string>Echo, test</tns:string>
<tns:string>Echo, test</tns:string>
</tns:echoResult>
</tns:echoResponse>
</soap11env:Body>'''
def get_function_names(self, suffix, _operation_name=None,
_in_message_name=None):
"""This tests the logic of how names are produced.
Its logic should match expected behavior of the decorator.
returns operation name, in message name, service name depending on
args"""
function_name = self.default_function_name
if _operation_name is None:
operation_name = function_name
else:
operation_name = _operation_name
if _in_message_name is None:
request_name = operation_name + suffix
else:
request_name = _in_message_name
return function_name, operation_name, request_name
def get_app(self, in_protocol, suffix, _operation_name=None,
_in_message_name=None):
"""setup testapp dependent on suffix and _in_message_name"""
import spyne.const
spyne.const.REQUEST_SUFFIX = suffix
class EchoService(Service):
srpc_kparams = {'_returns': Iterable(Unicode)}
if _in_message_name:
srpc_kparams['_in_message_name'] = _in_message_name
if _operation_name:
srpc_kparams['_operation_name'] = _operation_name
@srpc(Unicode, Integer, **srpc_kparams)
def echo(string, times):
for i in range(times):
yield 'Echo, %s' % string
application = Application([EchoService],
tns='spyne.examples.echo',
in_protocol=in_protocol,
out_protocol=Soap11()
)
app = WsgiApplication(application)
testapp = _TestApp(app)
# so that it doesn't interfere with other tests.
spyne.const.REQUEST_SUFFIX = ''
return testapp
def assert_response_ok(self, resp):
"""check the default response"""
self.assertEqual(resp.status_int, 200, resp)
self.assertTrue(
strip_whitespace(self.result_body) in strip_whitespace(str(resp)),
'{0} not in {1}'.format(self.result_body, resp))
### application error tests ###
def assert_application_error(self, suffix, _operation_name=None,
_in_message_name=None):
self.assertRaises(ValueError,
self.get_app, Soap11(validator='lxml'), suffix,
_operation_name, _in_message_name)
def test_assert_application_error(self):
"""check error when op namd and in name are both used"""
self.assert_application_error(suffix='',
_operation_name='TestOperationName',
_in_message_name='TestMessageName')
### soap tests ###
def assert_soap_ok(self, suffix, _operation_name=None,
_in_message_name=None):
"""helper to test soap requests"""
# setup
app = self.get_app(Soap11(validator='lxml'), suffix, _operation_name,
_in_message_name)
function_name, operation_name, request_name = self.get_function_names(
suffix, _operation_name, _in_message_name)
soap_input_body = """
<SOAP-ENV:Envelope xmlns:ns0="spyne.examples.echo"
xmlns:ns1="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/">
<SOAP-ENV:Header/>
<ns1:Body>
<ns0:{0}>
<ns0:string>test</ns0:string>
<ns0:times>2</ns0:times>
</ns0:{0}>
</ns1:Body>
</SOAP-ENV:Envelope>""".format(request_name)
# check wsdl
wsdl = app.get('/?wsdl')
self.assertEqual(wsdl.status_int, 200, wsdl)
self.assertTrue(request_name in wsdl,
'{0} not found in wsdl'.format(request_name))
soap_strings = [
'<wsdl:operation name="{0}"'.format(operation_name),
'<{0}:operation soapAction="{1}"'.format(PREFMAP[NS_WSDL11_SOAP], operation_name),
'<wsdl:input name="{0}">'.format(request_name),
'<xs:element name="{0}"'.format(request_name),
'<xs:complexType name="{0}">'.format(request_name),
]
for soap_string in soap_strings:
self.assertTrue(soap_string in wsdl,
'{0} not in {1}'.format(soap_string, wsdl))
if request_name != operation_name:
wrong_string = '<wsdl:operation name="{0}"'.format(request_name)
self.assertFalse(wrong_string in wsdl,
'{0} in {1}'.format(wrong_string, wsdl))
output_name = '<wsdl:output name="{0}Response"'.format(
self.default_function_name)
self.assertTrue(output_name in wsdl,
'REQUEST_SUFFIX or _in_message_name changed the '
'output name, it should be: {0}'.format(
output_name))
# check soap operation succeeded
resp = app.post('/', soap_input_body,
content_type='applicaion/xml; charset=utf8')
self.assert_response_ok(resp)
def test_soap_with_suffix(self):
self.assert_soap_ok(suffix='Request')
def test_soap_no_suffix(self):
self.assert_soap_ok(suffix='')
def test_soap_with_suffix_with_message_name(self):
self.assert_soap_ok(suffix='Request',
_in_message_name='TestInMessageName')
def test_soap_no_suffix_with_message_name(self):
self.assert_soap_ok(suffix='', _in_message_name='TestInMessageName')
def test_soap_with_suffix_with_operation_name(self):
self.assert_soap_ok(suffix='Request',
_operation_name='TestOperationName')
def test_soap_no_suffix_with_operation_name(self):
self.assert_soap_ok(suffix='', _operation_name='TestOperationName')
### json tests ###
def assert_json_ok(self, suffix, _operation_name=None,
_in_message_name=None):
"""helper to test json requests"""
# setup
app = self.get_app(JsonDocument(validator='soft'), suffix,
_operation_name, _in_message_name)
function_name, operation_name, request_name = self.get_function_names(
suffix, _operation_name, _in_message_name)
json_input_body = '{"' + request_name + '": {"string": "test", ' \
'"times": 2}}'
# check json operation succeeded
resp = app.post('/', json_input_body,
content_type='application/json; charset=utf8')
self.assert_response_ok(resp)
def test_json_with_suffix(self):
self.assert_json_ok(suffix='Request')
def test_json_no_suffix(self):
self.assert_json_ok(suffix='')
def test_json_with_suffix_with_message_name(self):
self.assert_json_ok(suffix='Request',
_in_message_name='TestInMessageName')
def test_json_no_suffix_with_message_name(self):
self.assert_json_ok(suffix='', _in_message_name='TestInMessageName')
def test_json_with_suffix_with_operation_name(self):
self.assert_soap_ok(suffix='Request',
_operation_name='TestOperationName')
def test_json_no_suffix_with_operation_name(self):
self.assert_soap_ok(suffix='', _operation_name='TestOperationName')
### HttpRpc tests ###
def assert_httprpc_ok(self, suffix, _operation_name=None,
_in_message_name=None):
"""Helper to test HttpRpc requests"""
# setup
app = self.get_app(HttpRpc(validator='soft'),
suffix, _operation_name, _in_message_name)
function_name, operation_name, request_name = \
self.get_function_names(suffix, _operation_name, _in_message_name)
url = "/{0}?string=test×=2".format(request_name)
# check httprpc operation succeeded
resp = app.get(url)
self.assert_response_ok(resp)
def test_httprpc_with_suffix(self):
self.assert_httprpc_ok(suffix='Request')
def test_httprpc_no_suffix(self):
self.assert_httprpc_ok(suffix='')
def test_httprpc_with_suffix_with_message_name(self):
self.assert_httprpc_ok(suffix='Request',
_in_message_name='TestInMessageName')
def test_httprpc_no_suffix_with_message_name(self):
self.assert_httprpc_ok(suffix='', _in_message_name='TestInMessageName')
def test_httprpc_with_suffix_with_operation_name(self):
self.assert_soap_ok(suffix='Request',
_operation_name='TestOperationName')
def test_httprpc_no_suffix_with_operation_name(self):
self.assert_soap_ok(suffix='', _operation_name='TestOperationName')
|
arskom/spyne
|
spyne/test/interface/wsdl/test_op_req_suffix.py
|
Python
|
lgpl-2.1
| 11,091
|
[
"NAMD"
] |
d7f4e36c8ec935e655723398269ceb03132c1038f09fcc5fd74368d12499c292
|
# Copyright 2013-2017, Brian May
#
# This file is part of python-alogger.
#
# python-alogger is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-alogger is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-alogger If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, unicode_literals
import unittest
from .base import Base
class TestSGE(Base, unittest.TestCase):
file_prefix = "sge"
log_type = "SGE"
|
Karaage-Cluster/python-alogger
|
alogger/tests/test_sge.py
|
Python
|
gpl-3.0
| 900
|
[
"Brian"
] |
c5f615f6f31b3cdff3f87d946fe92370e442295cd5c5ae9d3aba74e9a6ab4f53
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""A writer for chemical JSON (CJSON) files."""
import os.path
import json
import numpy as np
from cclib.io import filewriter
from cclib.parser.data import ccData
from cclib.parser.utils import find_package
_has_openbabel = find_package("openbabel")
class CJSON(filewriter.Writer):
"""A writer for chemical JSON (CJSON) files."""
def __init__(self, ccdata, terse=False, *args, **kwargs):
"""Initialize the chemical JSON writer object.
Inputs:
ccdata - An instance of ccData, parsed from a logfile.
"""
super().__init__(ccdata, terse=terse, *args, **kwargs)
def pathname(self, path):
"""Return filename without extension to be used as name."""
name = os.path.basename(os.path.splitext(path)[0])
return name
def as_dict(self):
""" Build a Python dict with the CJSON data"""
cjson_dict = dict()
# Need to decide on a number format.
cjson_dict['chemical json'] = 0
if self.jobfilename is not None:
cjson_dict['name'] = self.pathname(self.jobfilename)
# These are properties that can be collected using Open Babel.
if _has_openbabel:
cjson_dict['smiles'] = self.pbmol.write('smiles')
cjson_dict['inchi'] = self.pbmol.write('inchi')
cjson_dict['inchikey'] = self.pbmol.write('inchikey')
cjson_dict['formula'] = self.pbmol.formula
# TODO Incorporate unit cell information.
# Iterate through the attribute list present in ccData. Depending on
# the availability of the attribute add it at the right 'level'.
for attribute_name, v in ccData._attributes.items():
if not hasattr(self.ccdata, attribute_name):
continue
attribute_path = v.attribute_path.split(":")
# Depth of the attribute in the CJSON.
levels = len(attribute_path)
# The attributes which haven't been included in the CJSON format.
if attribute_path[0] == 'N/A':
continue
if attribute_path[0] not in cjson_dict:
cjson_dict[attribute_path[0]] = dict()
l1_data_object = cjson_dict[attribute_path[0]]
# 'moments' and 'atomcoords' key will contain processed data
# obtained from the output file. TODO rewrite this
if attribute_name in ('moments', 'atomcoords'):
if attribute_name == 'moments':
dipole_moment = self._calculate_total_dipole_moment()
if dipole_moment is not None:
cjson_dict['properties'][ccData._attributes['moments'].json_key] = dipole_moment
else:
cjson_dict['atoms']['coords'] = dict()
cjson_dict['atoms']['coords']['3d'] = self.ccdata.atomcoords[-1].flatten().tolist()
continue
if levels == 1:
self.set_JSON_attribute(l1_data_object, attribute_name)
elif levels >= 2:
if attribute_path[1] not in l1_data_object:
l1_data_object[attribute_path[1]] = dict()
l2_data_object = l1_data_object[attribute_path[1]]
if levels == 2:
self.set_JSON_attribute(l2_data_object, attribute_name)
elif levels == 3:
if attribute_path[2] not in l2_data_object:
l2_data_object[attribute_path[2]] = dict()
l3_data_object = l2_data_object[attribute_path[2]]
self.set_JSON_attribute(l3_data_object, attribute_name)
# Attributes which are not directly obtained from the output files.
if hasattr(self.ccdata, 'moenergies') and hasattr(self.ccdata, 'homos'):
if 'energy' not in cjson_dict['properties']:
cjson_dict['properties']['energy'] = dict()
cjson_dict['properties']['energy']['alpha'] = dict()
cjson_dict['properties']['energy']['beta'] = dict()
homo_idx_alpha = int(self.ccdata.homos[0])
homo_idx_beta = int(self.ccdata.homos[-1])
energy_alpha_homo = self.ccdata.moenergies[0][homo_idx_alpha]
energy_alpha_lumo = self.ccdata.moenergies[0][homo_idx_alpha + 1]
energy_alpha_gap = energy_alpha_lumo - energy_alpha_homo
energy_beta_homo = self.ccdata.moenergies[-1][homo_idx_beta]
energy_beta_lumo = self.ccdata.moenergies[-1][homo_idx_beta + 1]
energy_beta_gap = energy_beta_lumo - energy_beta_homo
cjson_dict['properties']['energy']['alpha']['homo'] = energy_alpha_homo
cjson_dict['properties']['energy']['alpha']['gap'] = energy_alpha_gap
cjson_dict['properties']['energy']['beta']['homo'] = energy_beta_homo
cjson_dict['properties']['energy']['beta']['gap'] = energy_beta_gap
cjson_dict['properties']['energy']['total'] = self.ccdata.scfenergies[-1]
if hasattr(self.ccdata, 'atomnos'):
cjson_dict['atoms']['elements']['atom count'] = len(self.ccdata.atomnos)
cjson_dict['atoms']['elements']['heavy atom count'] = len([x for x in self.ccdata.atomnos if x > 1])
# Bond attributes:
if _has_openbabel and (len(self.ccdata.atomnos) > 1):
cjson_dict['bonds'] = dict()
cjson_dict['bonds']['connections'] = dict()
cjson_dict['bonds']['connections']['index'] = []
for bond in self.bond_connectivities:
cjson_dict['bonds']['connections']['index'].append(bond[0])
cjson_dict['bonds']['connections']['index'].append(bond[1])
cjson_dict['bonds']['order'] = [bond[2] for bond in self.bond_connectivities]
if _has_openbabel:
cjson_dict['properties']['molecular mass'] = self.pbmol.molwt
cjson_dict['diagram'] = self.pbmol.write(format='svg')
return cjson_dict
def generate_repr(self):
"""Generate the CJSON representation of the logfile data."""
cjson_dict = self.as_dict()
if self.terse:
return json.dumps(cjson_dict, cls=NumpyAwareJSONEncoder)
else:
return json.dumps(cjson_dict, cls=JSONIndentEncoder, sort_keys=True, indent=4)
def set_JSON_attribute(self, object, key):
"""
Args:
object: Python dictionary which is being appended with the key value.
key: cclib attribute name.
Returns:
None. The dictionary is modified to contain the attribute with the
cclib keyname as key
"""
if hasattr(self.ccdata, key):
object[ccData._attributes[key].json_key] = getattr(self.ccdata, key)
class NumpyAwareJSONEncoder(json.JSONEncoder):
"""A encoder for numpy.ndarray's obtained from the cclib attributes.
For all other types the json default encoder is called.
Do Not rename the 'default' method as it is required to be implemented
by any subclass of the json.JSONEncoder
"""
def default(self, obj):
if isinstance(obj, np.ndarray):
if obj.ndim == 1:
nan_list = obj.tolist()
return [None if np.isnan(x) else x for x in nan_list]
else:
return [self.default(obj[i]) for i in range(obj.shape[0])]
return json.JSONEncoder.default(self, obj)
class JSONIndentEncoder(json.JSONEncoder):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.current_indent = 0
self.current_indent_str = ""
def encode(self, o):
# Special Processing for lists
if isinstance(o, (list, tuple)):
primitives_only = True
for item in o:
if isinstance(item, (list, tuple, dict)):
primitives_only = False
break
output = []
if primitives_only:
for item in o:
output.append(json.dumps(item, cls=NumpyAwareJSONEncoder))
return "[ " + ", ".join(output) + " ]"
else:
self.current_indent += self.indent
self.current_indent_str = "".join([" " for x in range(self.current_indent)])
for item in o:
output.append(self.current_indent_str + self.encode(item))
self.current_indent -= self.indent
self.current_indent_str = "".join([" " for x in range(self.current_indent)])
return "[\n" + ",\n".join(output) + "\n" + self.current_indent_str + "]"
elif isinstance(o, dict):
output = []
self.current_indent += self.indent
self.current_indent_str = "".join([" " for x in range(self.current_indent)])
for key, value in o.items():
output.append(self.current_indent_str + json.dumps(key, cls=NumpyAwareJSONEncoder) + ": " +
str(self.encode(value)))
self.current_indent -= self.indent
self.current_indent_str = "".join([" " for x in range(self.current_indent)])
return "{\n" + ",\n".join(output) + "\n" + self.current_indent_str + "}"
elif isinstance(o, np.generic):
return json.dumps(o.item(), cls=NumpyAwareJSONEncoder)
else:
return json.dumps(o, cls=NumpyAwareJSONEncoder)
del find_package
|
cclib/cclib
|
cclib/io/cjsonwriter.py
|
Python
|
bsd-3-clause
| 9,713
|
[
"Open Babel",
"cclib"
] |
12d319a890860d5e9cc59eea153208518f76863f700fe1d16f794f53817a677e
|
# $HeadURL$
""" Queries BDII for unknown CE.
Queries BDII for CE information and puts it to CS.
"""
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities import List
from DIRAC.Core.Utilities.Grid import ldapSite, ldapCluster, ldapCE, ldapCEState
from DIRAC.FrameworkSystem.Client.NotificationClient import NotificationClient
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC.Core.Security.ProxyInfo import getProxyInfo, formatProxyInfoAsString
from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath
from DIRAC.ConfigurationSystem.Client.Helpers.CSGlobals import getVO
class CE2CSAgent( AgentModule ):
addressTo = ''
addressFrom = ''
voName = ''
subject = "CE2CSAgent"
alternativeBDIIs = []
def initialize( self ):
# TODO: Have no default and if no mail is found then use the diracAdmin group
# and resolve all associated mail addresses.
self.addressTo = self.am_getOption( 'MailTo', self.addressTo )
self.addressFrom = self.am_getOption( 'MailFrom', self.addressFrom )
# Create a list of alternative bdii urls
self.alternativeBDIIs = self.am_getOption( 'AlternativeBDIIs', [] )
# Check if the bdii url is appended by a port number, if not append the default 2170
for index, url in enumerate( self.alternativeBDIIs ):
if not url.split( ':' )[-1].isdigit():
self.alternativeBDIIs[index] += ':2170'
if self.addressTo and self.addressFrom:
self.log.info( "MailTo", self.addressTo )
self.log.info( "MailFrom", self.addressFrom )
if self.alternativeBDIIs :
self.log.info( "AlternativeBDII URLs:", self.alternativeBDIIs )
self.subject = "CE2CSAgent"
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/TestManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption( 'shifterProxy', 'TestManager' )
self.voName = self.am_getOption( 'VirtualOrganization', [] )
if not self.voName:
vo = getVO()
if vo:
self.voName = [ vo ]
if self.voName:
self.log.info( "Agent will manage VO(s) %s" % self.voName )
else:
self.log.fatal( "VirtualOrganization option not defined for agent" )
return S_ERROR()
self.csAPI = CSAPI()
return self.csAPI.initialize()
def execute( self ):
self.log.info( "Start Execution" )
result = getProxyInfo()
if not result['OK']:
return result
infoDict = result[ 'Value' ]
self.log.info( formatProxyInfoAsString( infoDict ) )
# Get a "fresh" copy of the CS data
result = self.csAPI.downloadCSData()
if not result['OK']:
self.log.warn( "Could not download a fresh copy of the CS data", result[ 'Message' ] )
self.__lookForCE()
self.__infoFromCE()
self.log.info( "End Execution" )
return S_OK()
def __checkAlternativeBDIISite( self, fun, *args ):
if self.alternativeBDIIs:
self.log.warn( "Trying to use alternative BDII sites" )
for site in self.alternativeBDIIs :
self.log.info( "Trying to contact alternative BDII", site )
if len( args ) == 1 :
result = fun( args[0], host = site )
elif len( args ) == 2 :
result = fun( args[0], vo = args[1], host = site )
if not result['OK'] :
self.log.error ( "Problem contacting alternative BDII", result['Message'] )
elif result['OK'] :
return result
self.log.warn( "Also checking alternative BDII sites failed" )
return result
def __lookForCE( self ):
knownCEs = self.am_getOption( 'BannedCEs', [] )
result = gConfig.getSections( '/Resources/Sites' )
if not result['OK']:
return
grids = result['Value']
for grid in grids:
result = gConfig.getSections( '/Resources/Sites/%s' % grid )
if not result['OK']:
return
sites = result['Value']
for site in sites:
opt = gConfig.getOptionsDict( '/Resources/Sites/%s/%s' % ( grid, site ) )['Value']
ces = List.fromChar( opt.get( 'CE', '' ) )
knownCEs += ces
response = ''
for vo in self.voName:
self.log.info( "Check for available CEs for VO", vo )
response = ldapCEState( '', vo )
if not response['OK']:
self.log.error( "Error during BDII request", response['Message'] )
response = self.__checkAlternativeBDIISite( ldapCEState, '', vo )
return response
newCEs = {}
for queue in response['Value']:
try:
queueName = queue['GlueCEUniqueID']
except:
continue
ceName = queueName.split( ":" )[0]
if not ceName in knownCEs:
newCEs[ceName] = None
self.log.debug( "New CE", ceName )
body = ""
possibleNewSites = []
for ce in newCEs.iterkeys():
response = ldapCluster( ce )
if not response['OK']:
self.log.warn( "Error during BDII request", response['Message'] )
response = self.__checkAlternativeBDIISite( ldapCluster, ce )
continue
clusters = response['Value']
if len( clusters ) != 1:
self.log.warn( "Error in cluster length", " CE %s Length %d" % ( ce, len( clusters ) ) )
if len( clusters ) == 0:
continue
cluster = clusters[0]
fkey = cluster.get( 'GlueForeignKey', [] )
if type( fkey ) == type( '' ):
fkey = [fkey]
nameBDII = None
for entry in fkey:
if entry.count( 'GlueSiteUniqueID' ):
nameBDII = entry.split( '=' )[1]
break
if not nameBDII:
continue
ceString = "CE: %s, GOCDB Name: %s" % ( ce, nameBDII )
self.log.info( ceString )
response = ldapCE( ce )
if not response['OK']:
self.log.warn( "Error during BDII request", response['Message'] )
response = self.__checkAlternativeBDIISite( ldapCE, ce )
continue
ceInfos = response['Value']
if len( ceInfos ):
ceInfo = ceInfos[0]
systemName = ceInfo.get( 'GlueHostOperatingSystemName', 'Unknown' )
systemVersion = ceInfo.get( 'GlueHostOperatingSystemVersion', 'Unknown' )
systemRelease = ceInfo.get( 'GlueHostOperatingSystemRelease', 'Unknown' )
else:
systemName = "Unknown"
systemVersion = "Unknown"
systemRelease = "Unknown"
osString = "SystemName: %s, SystemVersion: %s, SystemRelease: %s" % ( systemName, systemVersion, systemRelease )
self.log.info( osString )
response = ldapCEState( ce, vo )
if not response['OK']:
self.log.warn( "Error during BDII request", response['Message'] )
response = self.__checkAlternativeBDIISite( ldapCEState, ce, vo )
continue
newCEString = "\n\n%s\n%s" % ( ceString, osString )
usefull = False
ceStates = response['Value']
for ceState in ceStates:
queueName = ceState.get( 'GlueCEUniqueID', 'UnknownName' )
queueStatus = ceState.get( 'GlueCEStateStatus', 'UnknownStatus' )
queueString = "%s %s" % ( queueName, queueStatus )
self.log.info( queueString )
newCEString += "\n%s" % queueString
if queueStatus.count( 'Production' ):
usefull = True
if usefull:
body += newCEString
possibleNewSites.append( 'dirac-admin-add-site DIRACSiteName %s %s' % ( nameBDII, ce ) )
if body:
body = "We are glad to inform You about new CE(s) possibly suitable for %s:\n" % vo + body
body += "\n\nTo suppress information about CE add its name to BannedCEs list."
for possibleNewSite in possibleNewSites:
body = "%s\n%s" % ( body, possibleNewSite )
self.log.info( body )
if self.addressTo and self.addressFrom:
notification = NotificationClient()
result = notification.sendMail( self.addressTo, self.subject, body, self.addressFrom, localAttempt = False )
return S_OK()
def __infoFromCE( self ):
sitesSection = cfgPath( 'Resources', 'Sites' )
result = gConfig.getSections( sitesSection )
if not result['OK']:
return
grids = result['Value']
changed = False
body = ""
for grid in grids:
gridSection = cfgPath( sitesSection, grid )
result = gConfig.getSections( gridSection )
if not result['OK']:
return
sites = result['Value']
for site in sites:
siteSection = cfgPath( gridSection, site )
opt = gConfig.getOptionsDict( siteSection )['Value']
name = opt.get( 'Name', '' )
if name:
coor = opt.get( 'Coordinates', 'Unknown' )
mail = opt.get( 'Mail', 'Unknown' )
result = ldapSite( name )
if not result['OK']:
self.log.warn( "BDII site %s: %s" % ( name, result['Message'] ) )
result = self.__checkAlternativeBDIISite( ldapSite, name )
if result['OK']:
bdiiSites = result['Value']
if len( bdiiSites ) == 0:
self.log.warn( name, "Error in BDII: leng = 0" )
else:
if not len( bdiiSites ) == 1:
self.log.warn( name, "Warning in BDII: leng = %d" % len( bdiiSites ) )
bdiiSite = bdiiSites[0]
try:
longitude = bdiiSite['GlueSiteLongitude']
latitude = bdiiSite['GlueSiteLatitude']
newcoor = "%s:%s" % ( longitude, latitude )
except:
self.log.warn( "Error in BDII coordinates" )
newcoor = "Unknown"
try:
newmail = bdiiSite['GlueSiteSysAdminContact'].split( ":" )[-1].strip()
except:
self.log.warn( "Error in BDII mail" )
newmail = "Unknown"
self.log.debug( "%s %s %s" % ( name, newcoor, newmail ) )
if newcoor != coor:
self.log.info( "%s" % ( name ), "%s -> %s" % ( coor, newcoor ) )
if coor == 'Unknown':
self.csAPI.setOption( cfgPath( siteSection, 'Coordinates' ), newcoor )
else:
self.csAPI.modifyValue( cfgPath( siteSection, 'Coordinates' ), newcoor )
changed = True
if newmail != mail:
self.log.info( "%s" % ( name ), "%s -> %s" % ( mail, newmail ) )
if mail == 'Unknown':
self.csAPI.setOption( cfgPath( siteSection, 'Mail' ), newmail )
else:
self.csAPI.modifyValue( cfgPath( siteSection, 'Mail' ), newmail )
changed = True
ceList = List.fromChar( opt.get( 'CE', '' ) )
if not ceList:
self.log.warn( site, 'Empty site list' )
continue
# result = gConfig.getSections( cfgPath( siteSection,'CEs' )
# if not result['OK']:
# self.log.debug( "Section CEs:", result['Message'] )
for ce in ceList:
ceSection = cfgPath( siteSection, 'CEs', ce )
result = gConfig.getOptionsDict( ceSection )
if not result['OK']:
self.log.debug( "Section CE", result['Message'] )
wnTmpDir = 'Unknown'
arch = 'Unknown'
os = 'Unknown'
si00 = 'Unknown'
pilot = 'Unknown'
ceType = 'Unknown'
else:
ceopt = result['Value']
wnTmpDir = ceopt.get( 'wnTmpDir', 'Unknown' )
arch = ceopt.get( 'architecture', 'Unknown' )
os = ceopt.get( 'OS', 'Unknown' )
si00 = ceopt.get( 'SI00', 'Unknown' )
pilot = ceopt.get( 'Pilot', 'Unknown' )
ceType = ceopt.get( 'CEType', 'Unknown' )
result = ldapCE( ce )
if not result['OK']:
self.log.warn( 'Error in BDII for %s' % ce, result['Message'] )
result = self.__checkAlternativeBDIISite( ldapCE, ce )
continue
try:
bdiiCE = result['Value'][0]
except:
self.log.warn( 'Error in BDII for %s' % ce, result )
bdiiCE = None
if bdiiCE:
try:
newWNTmpDir = bdiiCE['GlueSubClusterWNTmpDir']
except:
newWNTmpDir = 'Unknown'
if wnTmpDir != newWNTmpDir and newWNTmpDir != 'Unknown':
section = cfgPath( ceSection, 'wnTmpDir' )
self.log.info( section, " -> ".join( ( wnTmpDir, newWNTmpDir ) ) )
if wnTmpDir == 'Unknown':
self.csAPI.setOption( section, newWNTmpDir )
else:
self.csAPI.modifyValue( section, newWNTmpDir )
changed = True
try:
newArch = bdiiCE['GlueHostArchitecturePlatformType']
except:
newArch = 'Unknown'
if arch != newArch and newArch != 'Unknown':
section = cfgPath( ceSection, 'architecture' )
self.log.info( section, " -> ".join( ( arch, newArch ) ) )
if arch == 'Unknown':
self.csAPI.setOption( section, newArch )
else:
self.csAPI.modifyValue( section, newArch )
changed = True
try:
newOS = '_'.join( ( bdiiCE['GlueHostOperatingSystemName'],
bdiiCE['GlueHostOperatingSystemVersion'],
bdiiCE['GlueHostOperatingSystemRelease'] ) )
except:
newOS = 'Unknown'
if os != newOS and newOS != 'Unknown':
section = cfgPath( ceSection, 'OS' )
self.log.info( section, " -> ".join( ( os, newOS ) ) )
if os == 'Unknown':
self.csAPI.setOption( section, newOS )
else:
self.csAPI.modifyValue( section, newOS )
changed = True
body = body + "OS was changed %s -> %s for %s at %s\n" % ( os, newOS, ce, site )
try:
newSI00 = bdiiCE['GlueHostBenchmarkSI00']
except:
newSI00 = 'Unknown'
if si00 != newSI00 and newSI00 != 'Unknown':
section = cfgPath( ceSection, 'SI00' )
self.log.info( section, " -> ".join( ( si00, newSI00 ) ) )
if si00 == 'Unknown':
self.csAPI.setOption( section, newSI00 )
else:
self.csAPI.modifyValue( section, newSI00 )
changed = True
try:
rte = bdiiCE['GlueHostApplicationSoftwareRunTimeEnvironment']
for vo in self.voName:
if vo.lower() == 'lhcb':
if 'VO-lhcb-pilot' in rte:
newPilot = 'True'
else:
newPilot = 'False'
else:
newPilot = 'Unknown'
except:
newPilot = 'Unknown'
if pilot != newPilot and newPilot != 'Unknown':
section = cfgPath( ceSection, 'Pilot' )
self.log.info( section, " -> ".join( ( pilot, newPilot ) ) )
if pilot == 'Unknown':
self.csAPI.setOption( section, newPilot )
else:
self.csAPI.modifyValue( section, newPilot )
changed = True
newVO = ''
for vo in self.voName:
result = ldapCEState( ce, vo ) #getBDIICEVOView
if not result['OK']:
self.log.warn( 'Error in BDII for queue %s' % ce, result['Message'] )
result = self.__checkAlternativeBDIISite( ldapCEState, ce, vo )
continue
try:
queues = result['Value']
except:
self.log.warn( 'Error in BDII for queue %s' % ce, result['Massage'] )
continue
newCEType = 'Unknown'
for queue in queues:
try:
queueType = queue['GlueCEImplementationName']
except:
queueType = 'Unknown'
if newCEType == 'Unknown':
newCEType = queueType
else:
if queueType != newCEType:
self.log.warn( 'Error in BDII for CE %s ' % ce, 'different CE types %s %s' % ( newCEType, queueType ) )
if newCEType=='ARC-CE':
newCEType = 'ARC'
if ceType != newCEType and newCEType != 'Unknown':
section = cfgPath( ceSection, 'CEType' )
self.log.info( section, " -> ".join( ( ceType, newCEType ) ) )
if ceType == 'Unknown':
self.csAPI.setOption( section, newCEType )
else:
self.csAPI.modifyValue( section, newCEType )
changed = True
for queue in queues:
try:
queueName = queue['GlueCEUniqueID'].split( '/' )[-1]
except:
self.log.warn( 'Error in queueName ', queue )
continue
try:
newMaxCPUTime = queue['GlueCEPolicyMaxCPUTime']
except:
newMaxCPUTime = None
newSI00 = None
try:
caps = queue['GlueCECapability']
if type( caps ) == type( '' ):
caps = [caps]
for cap in caps:
if cap.count( 'CPUScalingReferenceSI00' ):
newSI00 = cap.split( '=' )[-1]
except:
newSI00 = None
queueSection = cfgPath( ceSection, 'Queues', queueName )
result = gConfig.getOptionsDict( queueSection )
if not result['OK']:
self.log.warn( "Section Queues", result['Message'] )
maxCPUTime = 'Unknown'
si00 = 'Unknown'
allowedVOs = ['']
else:
queueOpt = result['Value']
maxCPUTime = queueOpt.get( 'maxCPUTime', 'Unknown' )
si00 = queueOpt.get( 'SI00', 'Unknown' )
if newVO == '': # Remember previous iteration, if none - read from conf
allowedVOs = queueOpt.get( 'VO', '' ).split( "," )
else: # Else use newVO, as it can contain changes, which aren't in conf yet
allowedVOs = newVO.split( "," )
if newMaxCPUTime and ( maxCPUTime != newMaxCPUTime ):
section = cfgPath( queueSection, 'maxCPUTime' )
self.log.info( section, " -> ".join( ( maxCPUTime, newMaxCPUTime ) ) )
if maxCPUTime == 'Unknown':
self.csAPI.setOption( section, newMaxCPUTime )
else:
self.csAPI.modifyValue( section, newMaxCPUTime )
changed = True
if newSI00 and ( si00 != newSI00 ):
section = cfgPath( queueSection, 'SI00' )
self.log.info( section, " -> ".join( ( si00, newSI00 ) ) )
if si00 == 'Unknown':
self.csAPI.setOption( section, newSI00 )
else:
self.csAPI.modifyValue( section, newSI00 )
changed = True
modifyVO = True # Flag saying if we need VO option to change
newVO = ''
if allowedVOs != ['']:
for allowedVO in allowedVOs:
allowedVO = allowedVO.strip() # Get rid of spaces
newVO += allowedVO
if allowedVO == vo: # Current VO has been already in list
newVO = ''
modifyVO = False # Don't change anything
break # Skip next 'if', proceed to next VO
newVO += ', '
if modifyVO:
section = cfgPath( queueSection, 'VO' )
newVO += vo
self.log.info( section, " -> ".join( ( '%s' % allowedVOs, newVO ) ) )
if allowedVOs == ['']:
self.csAPI.setOption( section, newVO )
else:
self.csAPI.modifyValue( section, newVO )
changed = True
if changed:
self.log.info( body )
if body and self.addressTo and self.addressFrom:
notification = NotificationClient()
result = notification.sendMail( self.addressTo, self.subject, body, self.addressFrom, localAttempt = False )
return self.csAPI.commit()
else:
self.log.info( "No changes found" )
return S_OK()
|
calancha/DIRAC
|
ConfigurationSystem/Agent/CE2CSAgent.py
|
Python
|
gpl-3.0
| 21,025
|
[
"DIRAC"
] |
863893d778521d580e21f589cd20d62bae5ec36383a3777eba3927fbbb8be2c7
|
# -*- coding: utf-8 -*-
"""
celery.datastructures
~~~~~~~~~~~~~~~~~~~~~
Custom types and data structures.
"""
from __future__ import absolute_import
from __future__ import with_statement
import sys
import time
from collections import defaultdict
from itertools import chain
from billiard.einfo import ExceptionInfo # noqa
from kombu.utils.limits import TokenBucket # noqa
from .utils.functional import LRUCache, first, uniq # noqa
class CycleError(Exception):
"""A cycle was detected in an acyclic graph."""
class DependencyGraph(object):
"""A directed acyclic graph of objects and their dependencies.
Supports a robust topological sort
to detect the order in which they must be handled.
Takes an optional iterator of ``(obj, dependencies)``
tuples to build the graph from.
.. warning::
Does not support cycle detection.
"""
def __init__(self, it=None):
self.adjacent = {}
if it is not None:
self.update(it)
def add_arc(self, obj):
"""Add an object to the graph."""
self.adjacent.setdefault(obj, [])
def add_edge(self, A, B):
"""Add an edge from object ``A`` to object ``B``
(``A`` depends on ``B``)."""
self[A].append(B)
def topsort(self):
"""Sort the graph topologically.
:returns: a list of objects in the order
in which they must be handled.
"""
graph = DependencyGraph()
components = self._tarjan72()
NC = dict((node, component)
for component in components
for node in component)
for component in components:
graph.add_arc(component)
for node in self:
node_c = NC[node]
for successor in self[node]:
successor_c = NC[successor]
if node_c != successor_c:
graph.add_edge(node_c, successor_c)
return [t[0] for t in graph._khan62()]
def valency_of(self, obj):
"""Returns the velency (degree) of a vertex in the graph."""
try:
l = [len(self[obj])]
except KeyError:
return 0
for node in self[obj]:
l.append(self.valency_of(node))
return sum(l)
def update(self, it):
"""Update the graph with data from a list
of ``(obj, dependencies)`` tuples."""
tups = list(it)
for obj, _ in tups:
self.add_arc(obj)
for obj, deps in tups:
for dep in deps:
self.add_edge(obj, dep)
def edges(self):
"""Returns generator that yields for all edges in the graph."""
return (obj for obj, adj in self.iteritems() if adj)
def _khan62(self):
"""Khans simple topological sort algorithm from '62
See http://en.wikipedia.org/wiki/Topological_sorting
"""
count = defaultdict(lambda: 0)
result = []
for node in self:
for successor in self[node]:
count[successor] += 1
ready = [node for node in self if not count[node]]
while ready:
node = ready.pop()
result.append(node)
for successor in self[node]:
count[successor] -= 1
if count[successor] == 0:
ready.append(successor)
result.reverse()
return result
def _tarjan72(self):
"""Tarjan's algorithm to find strongly connected components.
See http://bit.ly/vIMv3h.
"""
result, stack, low = [], [], {}
def visit(node):
if node in low:
return
num = len(low)
low[node] = num
stack_pos = len(stack)
stack.append(node)
for successor in self[node]:
visit(successor)
low[node] = min(low[node], low[successor])
if num == low[node]:
component = tuple(stack[stack_pos:])
stack[stack_pos:] = []
result.append(component)
for item in component:
low[item] = len(self)
for node in self:
visit(node)
return result
def to_dot(self, fh, ws=' ' * 4):
"""Convert the graph to DOT format.
:param fh: A file, or a file-like object to write the graph to.
"""
fh.write('digraph dependencies {\n')
for obj, adjacent in self.iteritems():
if not adjacent:
fh.write(ws + '"%s"\n' % (obj, ))
for req in adjacent:
fh.write(ws + '"%s" -> "%s"\n' % (obj, req))
fh.write('}\n')
def __iter__(self):
return iter(self.adjacent)
def __getitem__(self, node):
return self.adjacent[node]
def __len__(self):
return len(self.adjacent)
def __contains__(self, obj):
return obj in self.adjacent
def _iterate_items(self):
return self.adjacent.iteritems()
items = iteritems = _iterate_items
def __repr__(self):
return '\n'.join(self.repr_node(N) for N in self)
def repr_node(self, obj, level=1):
output = ['%s(%s)' % (obj, self.valency_of(obj))]
if obj in self:
for other in self[obj]:
d = '%s(%s)' % (other, self.valency_of(other))
output.append(' ' * level + d)
output.extend(self.repr_node(other, level + 1).split('\n')[1:])
return '\n'.join(output)
class AttributeDictMixin(object):
"""Adds attribute access to mappings.
`d.key -> d[key]`
"""
def __getattr__(self, k):
"""`d.key -> d[key]`"""
try:
return self[k]
except KeyError:
raise AttributeError(
"'%s' object has no attribute '%s'" % (type(self).__name__, k))
def __setattr__(self, key, value):
"""`d[key] = value -> d.key = value`"""
self[key] = value
class AttributeDict(dict, AttributeDictMixin):
"""Dict subclass with attribute access."""
pass
class DictAttribute(object):
"""Dict interface to attributes.
`obj[k] -> obj.k`
"""
obj = None
def __init__(self, obj):
object.__setattr__(self, 'obj', obj)
def __getattr__(self, key):
return getattr(self.obj, key)
def __setattr__(self, key, value):
return setattr(self.obj, key, value)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default):
try:
return self[key]
except KeyError:
self[key] = default
return default
def __getitem__(self, key):
try:
return getattr(self.obj, key)
except AttributeError:
raise KeyError(key)
def __setitem__(self, key, value):
setattr(self.obj, key, value)
def __contains__(self, key):
return hasattr(self.obj, key)
def _iterate_keys(self):
return iter(dir(self.obj))
iterkeys = _iterate_keys
def __iter__(self):
return self._iterate_keys()
def _iterate_items(self):
for key in self._iterate_keys():
yield key, getattr(self.obj, key)
iteritems = _iterate_items
if sys.version_info[0] == 3: # pragma: no cover
items = _iterate_items
keys = _iterate_keys
else:
def keys(self):
return list(self)
def items(self):
return list(self._iterate_items())
class ConfigurationView(AttributeDictMixin):
"""A view over an applications configuration dicts.
If the key does not exist in ``changes``, the ``defaults`` dict
is consulted.
:param changes: Dict containing changes to the configuration.
:param defaults: Dict containing the default configuration.
"""
changes = None
defaults = None
_order = None
def __init__(self, changes, defaults):
self.__dict__.update(changes=changes, defaults=defaults,
_order=[changes] + defaults)
def add_defaults(self, d):
self.defaults.insert(0, d)
self._order.insert(1, d)
def __getitem__(self, key):
for d in self._order:
try:
return d[key]
except KeyError:
pass
raise KeyError(key)
def __setitem__(self, key, value):
self.changes[key] = value
def first(self, *keys):
return first(None, (self.get(key) for key in keys))
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default):
try:
return self[key]
except KeyError:
self[key] = default
return default
def update(self, *args, **kwargs):
return self.changes.update(*args, **kwargs)
def __contains__(self, key):
for d in self._order:
if key in d:
return True
return False
def __repr__(self):
return repr(dict(self.iteritems()))
def __iter__(self):
return self._iterate_keys()
def _iter(self, op):
# defaults must be first in the stream, so values in
# changes takes precedence.
return chain(*[op(d) for d in reversed(self._order)])
def _iterate_keys(self):
return uniq(self._iter(lambda d: d))
iterkeys = _iterate_keys
def _iterate_items(self):
return ((key, self[key]) for key in self)
iteritems = _iterate_items
def _iterate_values(self):
return (self[key] for key in self)
itervalues = _iterate_values
def keys(self):
return list(self._iterate_keys())
def items(self):
return list(self._iterate_items())
def values(self):
return list(self._iterate_values())
class LimitedSet(object):
"""Kind-of Set with limitations.
Good for when you need to test for membership (`a in set`),
but the list might become to big, so you want to limit it so it doesn't
consume too much resources.
:keyword maxlen: Maximum number of members before we start
evicting expired members.
:keyword expires: Time in seconds, before a membership expires.
"""
__slots__ = ('maxlen', 'expires', '_data', '__len__')
def __init__(self, maxlen=None, expires=None):
self.maxlen = maxlen
self.expires = expires
self._data = {}
self.__len__ = self._data.__len__
def add(self, value):
"""Add a new member."""
self._expire_item()
self._data[value] = time.time()
def clear(self):
"""Remove all members"""
self._data.clear()
def pop_value(self, value):
"""Remove membership by finding value."""
self._data.pop(value, None)
def _expire_item(self):
"""Hunt down and remove an expired item."""
while 1:
if self.maxlen and len(self) >= self.maxlen:
value, when = self.first
if not self.expires or time.time() > when + self.expires:
try:
self.pop_value(value)
except TypeError: # pragma: no cover
continue
break
def __contains__(self, value):
return value in self._data
def update(self, other):
if isinstance(other, self.__class__):
self._data.update(other._data)
else:
for obj in other:
self.add(obj)
def as_dict(self):
return self._data
def __iter__(self):
return iter(self._data)
def __repr__(self):
return 'LimitedSet(%r)' % (list(self._data), )
@property
def chronologically(self):
return sorted(self._data.items(), key=lambda (value, when): when)
@property
def first(self):
"""Get the oldest member."""
return self.chronologically[0]
|
mozilla/firefox-flicks
|
vendor-local/lib/python/celery/datastructures.py
|
Python
|
bsd-3-clause
| 12,131
|
[
"VisIt"
] |
737af31fe4841f9f9ddaf7f76e9f75fb2490452b4c2951c5b49a695ba9b5f7fa
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import espressomd
import numpy as np
import os
import sys
import unittest as ut
from tests_common import abspath
@ut.skipIf(not espressomd.has_features("MEMBRANE_COLLISION", "OIF_LOCAL_FORCES", "OIF_GLOBAL_FORCES"), "OIF featues not compiled in.")
class OifVolumeConservation(ut.TestCase):
"""Loads a soft elastic sphere via object_in_fluid, stretches it and checks resotration of original volume due to elastic forces."""
def test(self):
import object_in_fluid as oif
system = espressomd.System(box_l=(10, 10, 10))
self.assertEqual(system.max_oif_objects, 0)
system.time_step = 0.1
system.cell_system.skin = 0.5
system.thermostat.set_langevin(kT=0, gamma=0.7)
# creating the template for OIF object
cell_type = oif.OifCellType(nodes_file=abspath("data/sphere393nodes.dat"), triangles_file=abspath(
"data/sphere393triangles.dat"), system=system, ks=1.0, kb=1.0, kal=1.0, kag=0.1, kv=0.1, check_orientation=False, resize=(3.0, 3.0, 3.0))
# creating the OIF object
cell0 = oif.OifCell(
cell_type=cell_type, particle_type=0, origin=[5.0, 5.0, 5.0])
self.assertEqual(system.max_oif_objects, 1)
# cell0.output_vtk_pos_folded(file_name="cell0_0.vtk")
# fluid
diameter_init = cell0.diameter()
print("initial diameter = " + str(diameter_init))
# OIF object is being stretched by factor 1.5
maxCycle = 500
system.part[:].pos = (system.part[:].pos - 5) * 1.5 + 5
diameter_stretched = cell0.diameter()
print("stretched diameter = " + str(diameter_stretched))
# main integration loop
# OIF object is let to relax into relaxed shape of the sphere
for i in range(3):
system.integrator.run(steps=300)
diameter_final = cell0.diameter()
print("final diameter = " + str(diameter_final))
self.assertAlmostEqual(
diameter_final / diameter_init - 1, 0, delta=0.005)
if __name__ == "__main__":
# print("Features: ", espressomd.features())
ut.main()
|
hmenke/espresso
|
testsuite/python/oif_volume_conservation.py
|
Python
|
gpl-3.0
| 2,842
|
[
"ESPResSo",
"VTK"
] |
13c6293cae008b9aede4f67eebac2dbde356a31fef89aced2e1330a2b2864281
|
import unittest
from pymatgen.core import Structure
from veidt.monte_carlo.base import StateDict, StaticState
from veidt.monte_carlo.state import AtomNumberState, IsingState
from veidt.monte_carlo.state import SpinStructure, Chain
import os
file_path = os.path.dirname(__file__)
def unequal_site_number(list1, list2):
return sum([i != j for i, j in zip(list1, list2)])
class TestMonteCarlo(unittest.TestCase):
def test_ising_state(self):
ising_state = IsingState([0, 1, 0, 1])
new_ising_state = ising_state.copy()
self.assertListEqual(ising_state.state, new_ising_state.state)
self.assertEqual(ising_state, IsingState([0, 1, 0, 1], 'ising2'))
self.assertEqual(ising_state.n, 4)
self.assertListEqual(ising_state.state, [0, 1, 0, 1])
self.assertEqual(ising_state.name, 'ising')
ising_state.change()
self.assertEqual(unequal_site_number(ising_state.state, [0, 1, 0, 1]), 1)
def test_atom_number_state(self):
atom_number = AtomNumberState(10)
self.assertEqual(atom_number.state, 10)
atom_number.change()
self.assertIn(atom_number.state, [9, 11])
def test_spin_structure(self):
species_map = {0: 'K', 1: 'Na'}
structure = Structure.from_file(os.path.join(file_path, 'test_NaCoO2.cif'))
state_dict = StateDict([StaticState(100, 'temperature'),
AtomNumberState(10),
IsingState([0]*22+[1, 1])])
spin_struct = SpinStructure(structure, state_dict, species_map)
self.assertListEqual(spin_struct.state_dict['ising'].state, [0] * 22 + [1, 1])
orig_specie_list = spin_struct.to_specie_list()
# test move method
spin_struct = SpinStructure(structure, state_dict, species_map)
spin_struct.change()
self.assertEqual(unequal_site_number(spin_struct.state_dict['ising'].state, [0] * 22 + [1, 1]), 1)
specie_list = spin_struct.to_specie_list()
self.assertEqual(unequal_site_number(orig_specie_list, specie_list), 1)
# test from_states
spin_struct.from_states(
StateDict([StaticState(1000, 'temperature'),
AtomNumberState(10), IsingState([0]*20+[1, 1] + [0, 0])]))
self.assertEqual(unequal_site_number(spin_struct.to_specie_list(), orig_specie_list), 4)
self.assertEqual(unequal_site_number(spin_struct.to_states()['ising'].state, [0]*22 + [1, 1]), 4)
# test structure to states
self.assertListEqual(spin_struct.structure_to_states(structure)['ising'].state,
[0] * 22 + [1, 1])
def test_chain(self):
spin_state = IsingState([0, 1, 0])
atom_state = AtomNumberState(10)
state_dict = StateDict([spin_state, atom_state])
chain = Chain()
chain.append(state_dict)
chain.append(StateDict([AtomNumberState(20), IsingState([1, 1, 1])]))
self.assertListEqual(chain.chain['ising'][0], [0, 1, 0])
self.assertListEqual(chain.chain['ising'][1], [1, 1, 1])
self.assertListEqual(chain.chain['atom_number'], [10, 20])
self.assertIs(spin_state._chain, chain)
self.assertEqual(spin_state._chain.length, 2)
if __name__ == '__main__':
unittest.main()
|
materialsvirtuallab/veidt
|
veidt/monte_carlo/tests/test_states.py
|
Python
|
bsd-3-clause
| 3,311
|
[
"pymatgen"
] |
a7b91100050cb09537b7f17f54de4faaceee93ef21fd69beea64b2ccf371e1c5
|
#!/usr/bin/env python
# tgraph - a python based program to plot 1D and 2D data files
# Copyright (C) 2015 Wolfgang Tichy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
# check python version
if sys.version_info[0] < 3:
from Tkinter import *
if sys.version_info[1] > 6:
from ttk import * # overrides some tkinter stuff
import tkFileDialog as filedialog
else:
from tkinter import *
from tkinter.ttk import * # overrides some tkinter stuff
import tkinter.filedialog as filedialog
# for 2d
import matplotlib as mpl
# for 3d
from mpl_toolkits.mplot3d import axes3d,Axes3D
from matplotlib import cm
# for tkinter
mpl.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
try:
from matplotlib.backends.backend_tkagg import NavigationToolbar2Tk as NavToolbar
except:
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg as NavToolbar
#from matplotlib.figure import Figure
import numpy as np
# my data classes
import tdata
######################################################################
# tgraph version number
tgraph_version = "1.8"
print('tgraph', tgraph_version)
# get dir where tdata.py is found, and construct filename of tgraph.txt
import os
tdata_dir = os.path.dirname(tdata.__file__)
tgraph_txt_file = os.path.join(tdata_dir, 'tgraph.txt')
######################################################################
# default cols:
xcol = 0
ycol = 1
zcol = 2
vcol = 1
#print('Default cols:', xcol+1,ycol+1,zcol+1,':', vcol+1)
print('Default cols:', xcol+1,ycol+1,':', vcol+1)
# default stride
graph_stride = 1
######################################################################
# load data from command line arguments
filelist = tdata.tFileList()
argvs = sys.argv[1:]
print('Trying to open files:')
gotC = 0
gotx = 0
goty = 0
gotv = 0
gots = 0
gott = 0
timelabel_str = 'time'
got_xrange = 0
got_yrange = 0
got_vrange = 0
openCBrack = 0
inCBrack = 0
openSBrack = 0
inSBrack = 0
endSBrack = 0
for argv in argvs:
# check for new -c opt
pos = argv.find('-c')
if pos == 0:
gotC = 1
continue
# check for new -x opt
pos = argv.find('-x')
if pos == 0:
gotx = 1
continue
# check for new -y opt
pos = argv.find('-y')
if pos == 0:
goty = 1
continue
# check for new -v opt
pos = argv.find('-v')
if pos == 0:
gotv = 1
continue
# check for new -s opt
pos = argv.find('-s')
if pos == 0:
gots = 1
continue
# check for new -t opt
pos = argv.find('-t')
if pos == 0:
gott = 1
continue
# check for -m opt
pos = argv.find('-m')
if pos == 0:
mpl.rcParams['lines.marker'] = 'o'
continue
# did we get -c opt?
if gotC == 1:
cols = argv.split(':')
xcol = int(cols[0])-1
if len(cols)==2:
vcol = int(cols[1])-1
if len(cols)==3:
ycol = int(cols[1])-1
vcol = int(cols[2])-1
# print('cols:', xcol+1,ycol+1,zcol+1,':', vcol+1)
print('cols:', xcol+1,ycol+1,':', vcol+1)
gotC = 0
continue
# did we get -x, -y or -v opts?
if gotx == 1 or goty == 1 or gotv == 1:
vrange = argv.split(':')
#print(vrange)
if gotx == 1:
graph_xmin = float(vrange[0])
graph_xmax = float(vrange[1])
got_xrange = 1
if goty == 1:
graph_ymin = float(vrange[0])
graph_ymax = float(vrange[1])
got_yrange = 1
if gotv == 1:
graph_vmin = float(vrange[0])
graph_vmax = float(vrange[1])
got_vrange = 1
gotx = 0
goty = 0
gotv = 0
continue
# did we get -s opts?
if gots == 1:
graph_stride = int(argv)
gots = 0
continue
# did we get -t opts?
if gott == 1:
timelabel_str = str(argv).lower()
gott = 0
continue
# check for brackets, is there a '{' or a '}'
pos = argv.find('{')
if pos == 0:
openCBrack = 1
inCBrack = 0
continue
pos = argv.find('}')
if pos == 0:
openCBrack = 0
inCBrack = 0
continue
# check for brackets, is there a '[' or a ']'
pos = argv.find('[')
if pos == 0:
openSBrack = 1
inSBrack = 0
endSBrack = 0
continue
pos = argv.find(']')
if pos == 0:
openSBrack = 0
inSBrack = 0
endSBrack = 1
# no continue here
if endSBrack == 1:
endSBrack = 0
else:
# if we get here, there was no opt, so we have a filename
filelist.add(argv, timelabel_str)
# print('cols:', xcol+1,ycol+1,zcol+1,':', vcol+1)
print(filelist.file[-1].filename)
#for tf in filelist.file[-1].data.timeframes:
# print('blocks =', tf.blocks)
# set cols for the last file added
filelist.file[-1].data.set_cols(xcol=xcol, ycol=ycol, zcol=2, vcol=vcol)
# are we in a [ ] block so that we have to append a file?
if inSBrack == 1:
filelist.append_file_i2_to_i1(-2, -1)
continue
if openSBrack == 1:
inSBrack = 1
openSBrack = 0
continue
# are we in a { } block so that we have to merge files?
if inCBrack == 1:
# print(filelist.file)
filelist.merge_file_i2_into_i1(-2, -1)
if openCBrack == 1:
inCBrack = 1
openCBrack = 0
#for f in filelist.file:
# print("timeframes of", f.name, "after merge")
# for tf in f.data.timeframes:
# print(tf.time)
# print(tf.data)
if len(filelist.file) == 0:
print('no files given on command line\n')
print('Purpose of tgraph.py:')
print('We can show and animate data from files that contain multiple timeframes.')
print('Each timeframe consists of a timelabel and a number of data columns. E.g.:')
print('# time = 1.0')
print('1 2')
print('2 5')
print('3 10\n')
print('Usage:')
print('tgraph.py [-c 1:2[:3]] File1 File2 ... { FileX FileF } ... [ f_t1 f_t2 ]\n')
print('Options:')
print('-c specifies which columns to select')
print('{ } one can add columns from different files by enclosing files in { }')
print('[ ] one can add timeframes from different files by enclosing files in [ ]')
print('-x , -y , -v specify x-, y-, value-ranges, format is: -v vmin:vmax')
print('-t specifies timelabel, default is: -t time')
print('-s specifies stride (or step size) used to sample input data')
print('-m mark each point\n')
print('Examples:')
print('# select cols 1,2 in file1 and cols 1,4 of file2,file3 added together:')
print('tgraph.py -c 1:2 file1 -c 1:4 { file2 file3 }')
print('# select cols 1,2,4 from t1.vtk,t2.vtk,t3.vtk that contain one timeframe each:')
print('tgraph.py -s 10 -c 1:2:4 [ t1.vtk t2.vtk t3.vtk ]')
print('# select x- and value-ranges for data in file1 and mark points:')
print('tgraph.py -x 1:5 -v 0:2 -m file1')
# exit(1)
######################################################################
# root window for app
root = Tk()
root.wm_title("tgraph")
######################################################################
# init global dictionaries
# dictionaries with labels and legend
graph_labelsOn = 0
graph_labels = {}
graph_labels['title'] = ''
graph_labels['x-axis'] = ''
graph_labels['y-axis'] = ''
graph_labels['v-axis'] = ''
graph_labels['fontsize'] = mpl.rcParams['font.size']
graph_labels['timeformat'] = '%g'
graph_legendOn = 0
graph_legend = {}
graph_legend['fontsize'] = mpl.rcParams['font.size']
graph_legend['loc'] = 'upper right'
graph_legend['fancybox'] = mpl.rcParams['legend.fancybox']
graph_legend['shadow'] = mpl.rcParams['legend.shadow']
if mpl.__version__ > '1.4.2':
graph_legend['frameon'] = mpl.rcParams['legend.frameon']
graph_legend['framealpha'] = mpl.rcParams['legend.framealpha']
graph_legend['handlelength'] = mpl.rcParams['legend.handlelength']
# dictionary with settings for graph
graph_settings = {}
if mpl.__version__ >= '1.4.2':
graph_settings['colormap'] = 'coolwarm'
else:
graph_settings['colormap'] = 'jet'
graph_settings['linewidth'] = mpl.rcParams['lines.linewidth']
graph_settings['antialiased'] = 0
graph_settings['shade'] = 1
graph_settings['edgecolor'] = 'none'
# dictionary with settings for graph, where we need to setup axes after change
graph_limits = {}
# dictionaries with lines colors, styles, markers and widths
graph_linecolors = {}
graph_linestyles = {}
graph_linemarkers = {}
graph_linewidths = {}
# dictionaries with transformations
graph_coltrafos = {}
######################################################################
# functions needed early
# function to add file by # to global dictionaries
def set_graph_globals_for_file_i(filelist, i):
global graph_legend
global graph_linecolors
global graph_linestyles
global graph_linemarkers
global graph_linewidths
global graph_coltrafos
f = filelist.file[i]
graph_legend['#'+str(i)] = f.name
# can we use axes.prop_cycle ?
if mpl.__version__ < '1.5.1': # use axes.color_cycle below Matplotlib 1.5.1
if mpl.__version__ < '1.4.2':
color_cycle = [u'#1f77b4', u'#ff7f0e', u'#2ca02c', u'#d62728', u'#9467bd', u'#8c564b', u'#e377c2', u'#7f7f7f', u'#bcbd22', u'#17becf']
else:
color_cycle = mpl.rcParams['axes.color_cycle']
ncolors = len(color_cycle)
graph_linecolors['#'+str(i)] = color_cycle[i%ncolors]
else: # use axes.prop_cycle for all other versions
cycle_list = list(mpl.rcParams['axes.prop_cycle'])
ncolors = len(cycle_list)
graph_linecolors['#'+str(i)] = cycle_list[i%ncolors]['color']
graph_linestyles['#'+str(i)] = '-'
marker = mpl.rcParams['lines.marker']
graph_linemarkers['#'+str(i)] = marker
graph_linewidths['#'+str(i)] = ''
graph_coltrafos['#'+str(i)] = ''
# specify a file graphically
def open_file():
global filelist
global xcol
global ycol
global vcol
global timelabel_str
global graph_timelist
fname = filedialog.askopenfilename(title='Enter Data File Name')
if len(fname) == 0: # if user presses cancel fname is () or '', so exit
return
filelist.add(fname, timelabel_str)
# print('cols:', xcol+1,ycol+1,zcol+1,':', vcol+1)
i = len(filelist.file)-1
print(filelist.file[i].filename)
# set cols for the last file added
filelist.file[i].data.set_cols(xcol=xcol, ycol=ycol, zcol=2, vcol=vcol)
set_graph_globals_for_file_i(filelist, i)
# update time min and max
graph_timelist = filelist.get_timelist()
######################################################################
# no file was given on command line, ask for one now
if len(filelist.file) == 0:
open_file()
if len(filelist.file) == 0:
# print('\nNo files found!')
exit(1)
# add all files to to global dictionaries
for i in range(0, len(filelist.file)):
set_graph_globals_for_file_i(filelist, i)
######################################################################
# set global vars
graph_time = filelist.mintime()
graph_timelist = filelist.get_timelist()
graph_timeindex = tdata.geti_from_t(graph_timelist, graph_time)
graph_delay = 1
if got_xrange != 1:
graph_xmin = tdata.inf_to_1e300(filelist.minx())
graph_xmax = tdata.inf_to_1e300(filelist.maxx())
if got_yrange != 1:
graph_ymin = tdata.inf_to_1e300(filelist.miny())
graph_ymax = tdata.inf_to_1e300(filelist.maxy())
#if got_zrange != 1:
#graph_zmin = tdata.inf_to_1e300(filelist.minz())
#graph_zmax = tdata.inf_to_1e300(filelist.maxz())
if got_vrange != 1:
graph_vmin = tdata.inf_to_1e300(filelist.minv())
graph_vmax = tdata.inf_to_1e300(filelist.maxv())
graph_3dOn = 0
graph_axis_on = 1
graph_plot_surface = 0
graph_plot_scatter = 0
graph_clear_on_replot = 1
graph_plot_closest_t = 1
graph_plot_grid = 1
# graph_colormap =
exec('graph_colormap=cm.'+str(graph_settings['colormap']))
######################################################################
# add some global vars to dictionaries
graph_settings['stride'] = graph_stride
graph_limits['xmin'] = graph_xmin
graph_limits['xmax'] = graph_xmax
graph_limits['ymin'] = graph_ymin
graph_limits['ymax'] = graph_ymax
graph_limits['vmin'] = graph_vmin
graph_limits['vmax'] = graph_vmax
######################################################################
# functions
# setup ax for either 2d or 3d plots
def setup_axes(fig, graph_3dOn, ax=None):
if ax != None:
fig.delaxes(ax)
if graph_3dOn == 0:
# make 2d ax to plot graph
ax = fig.add_subplot(111)
ax.set_xlim(graph_xmin, graph_xmax)
ax.set_ylim(graph_vmin, graph_vmax)
else:
# make 3d ax to plot graph
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim(graph_xmin, graph_xmax)
ax.set_ylim(graph_ymin, graph_ymax)
ax.set_zlim(graph_vmin, graph_vmax)
return ax
# plot into ax at time t
def axplot2d_at_time(filelist, canvas, ax, t):
global graph_clear_on_replot
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xscale = ax.get_xscale()
yscale = ax.get_yscale()
if graph_clear_on_replot == 1:
ax.clear()
ct = graph_plot_closest_t
for i in range(0, len(filelist.file)):
f = filelist.file[i]
if graph_plot_scatter == 1:
mark=str(graph_linemarkers['#'+str(i)])
if mark == '' or mark == 'None':
mark='o'
ax.scatter(f.data.getx(t,ct), f.data.getv(t,ct), label=f.name,
color=graph_linecolors['#'+str(i)], marker=mark)
elif str(graph_linewidths['#'+str(i)]) == '':
ax.plot(f.data.getx(t,ct), f.data.getv(t,ct), label=f.name,
color=graph_linecolors['#'+str(i)],
linestyle=graph_linestyles['#'+str(i)],
marker=graph_linemarkers['#'+str(i)])
else:
ax.plot(f.data.getx(t,ct), f.data.getv(t,ct), label=f.name,
color=graph_linecolors['#'+str(i)],
linewidth=float(graph_linewidths['#'+str(i)]),
linestyle=graph_linestyles['#'+str(i)],
marker=graph_linemarkers['#'+str(i)])
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xscale(xscale)
ax.set_yscale(yscale)
if graph_labelsOn == 1:
ax.set_xlabel(graph_labels['x-axis'], fontsize=graph_labels['fontsize'])
ax.set_ylabel(graph_labels['v-axis'], fontsize=graph_labels['fontsize'])
ax.set_title(graph_labels['title'])
tf = graph_labels['timeformat']
if len(tf) > 0:
tstr = tf % t
ax.set_title(tstr, loc='right')
if graph_legendOn == 1:
ax.legend(fontsize=graph_legend['fontsize'], loc=graph_legend['loc'])
if graph_plot_grid == 1:
ax.grid(True)
# plot into ax at time t, in 3d
def axplot3d_at_time(filelist, canvas, ax, t):
global graph_stride
global graph_colormap
global graph_clear_on_replot
xlim = ax.get_xlim()
ylim = ax.get_ylim()
zlim = ax.get_zlim()
xscale = ax.get_xscale()
yscale = ax.get_yscale()
zscale = ax.get_zscale()
if graph_clear_on_replot == 1:
ax.clear()
ct = graph_plot_closest_t
for i in range(0, len(filelist.file)):
f = filelist.file[i]
blocks = f.data.getblocks(t)
if blocks < 2 and graph_plot_surface == 1:
print('3D plot will work only with wireframe, because input data had no empty lines.')
reshaper = (blocks, -1)
x=np.reshape(f.data.getx(t,ct), reshaper)
y=np.reshape(f.data.gety(t,ct), reshaper)
v=np.reshape(f.data.getv(t,ct), reshaper)
#print(x,y,v)
if graph_plot_surface == 1:
if str(graph_settings['colormap']) == '':
ax.plot_surface(x,y, v, rstride=graph_stride,cstride=graph_stride,
label=f.name, color=graph_linecolors['#'+str(i)],
antialiased=int(graph_settings['antialiased']),
shade=int(graph_settings['shade']),
linewidth=float(graph_settings['linewidth']),
edgecolor=graph_settings['edgecolor'])
else:
ax.plot_surface(x,y, v, rstride=graph_stride,cstride=graph_stride,
label=f.name, cmap=graph_colormap,
antialiased=int(graph_settings['antialiased']),
shade=int(graph_settings['shade']),
linewidth=float(graph_settings['linewidth']),
edgecolor=graph_settings['edgecolor'])
else:
if graph_plot_scatter == 1:
mark=str(graph_linemarkers['#'+str(i)])
if mark == '' or mark == 'None':
mark='o'
ax.scatter(x,y, v, label=f.name,
color=graph_linecolors['#'+str(i)], marker=mark)
else:
ax.plot_wireframe(x,y, v, rstride=graph_stride,cstride=graph_stride,
label=f.name, color=graph_linecolors['#'+str(i)])
# this does not seem to work in 3d:
#ax.set_xlim(xlim)
#ax.set_ylim(ylim)
#ax.set_zlim(zlim)
#ax.set_xscale(xscale)
#ax.set_yscale(yscale)
#ax.set_zscale(zscale)
ax.set_xlim(graph_xmin, graph_xmax)
ax.set_ylim(graph_ymin, graph_ymax)
ax.set_zlim(graph_vmin, graph_vmax)
if graph_labelsOn == 1:
ax.set_xlabel(graph_labels['x-axis'], fontsize=graph_labels['fontsize'])
ax.set_ylabel(graph_labels['y-axis'], fontsize=graph_labels['fontsize'])
ax.set_zlabel(graph_labels['v-axis'], fontsize=graph_labels['fontsize'])
ax.set_title(graph_labels['title'])
tf = graph_labels['timeformat']
if len(tf) > 0:
tstr = tf % t
ax.set_title(tstr, loc='right')
# Note: legend does not work for surface. Is matplotlib broken???
if graph_legendOn == 1 and graph_plot_surface == 0:
ax.legend(fontsize=graph_legend['fontsize'], loc=graph_legend['loc'])
def replot():
global filelist
global canvas
global ax
global graph_time
global graph_3dOn
global graph_axis_on
if graph_3dOn == 0:
axplot2d_at_time(filelist, canvas, ax, graph_time)
else:
axplot3d_at_time(filelist, canvas, ax, graph_time)
# print(ax.xaxis.tick_top())
if graph_axis_on == 0:
ax.set_axis_off()
canvas.draw()
# callbacks for some events
def update_graph_time_entry():
global tentry
global graph_time
tentry.delete(0, END)
tentry.insert(0, str(graph_time))
replot()
def draw_legend():
ax.legend(fontsize=10) # ax.legend() # (fontsize=8)
canvas.draw()
def toggle_log_xscale():
if ax.get_xscale() == 'linear':
ax.set_xscale('log')
else:
ax.set_xscale('linear')
canvas.draw()
def toggle_log_yscale():
if ax.get_yscale() == 'linear':
ax.set_yscale('log')
else:
ax.set_yscale('linear')
canvas.draw()
def toggle_plot_grid():
global graph_plot_grid
if graph_plot_grid == 1:
graph_plot_grid = 0
else:
graph_plot_grid = 1
replot()
def toggle_axis_on():
global graph_axis_on
if graph_axis_on == 1:
graph_axis_on = 0
else:
graph_axis_on = 1
replot()
def toggle_2d_3d():
global fig
global graph_3dOn
global ax
if graph_3dOn == 1:
graph_3dOn = 0
else:
graph_3dOn = 1
ax = setup_axes(fig, graph_3dOn, ax)
replot()
def toggle_wireframe_surface():
global fig
global graph_3dOn
global ax
global graph_plot_surface
global graph_plot_scatter
if graph_plot_surface == 1:
graph_plot_surface = 0
else:
graph_plot_surface = 1
# ax = setup_axes(fig, graph_3dOn, ax)
replot()
def toggle_wireframe_scatter():
global fig
global graph_3dOn
global ax
global graph_plot_surface
global graph_plot_scatter
if graph_plot_surface == 1:
graph_plot_scatter = 0
graph_plot_surface = 0
if graph_plot_scatter == 1:
graph_plot_scatter = 0
else:
graph_plot_scatter = 1
# ax = setup_axes(fig, graph_3dOn, ax)
replot()
def toggle_labels():
global graph_labelsOn
if graph_labelsOn == 1:
graph_labelsOn = 0
else:
graph_labelsOn = 1
replot()
def toggle_legend():
global graph_legendOn
if graph_legendOn == 1:
graph_legendOn = 0
else:
graph_legendOn = 1
replot()
def toggle_clear_on_replot():
global graph_clear_on_replot
if graph_clear_on_replot == 1:
graph_clear_on_replot = 0
else:
graph_clear_on_replot = 1
replot()
def toggle_plot_closest_t():
global graph_plot_closest_t
if graph_plot_closest_t == 1:
graph_plot_closest_t = 0
else:
graph_plot_closest_t = 1
replot()
def BT1_callback(event):
print("clicked at", event.x, event.y)
def not_implemented():
print("not implemented yet!")
def set_graph_time(event, ent):
global graph_time
global graph_timeindex
global graph_timelist
t = ent.get()
graph_timeindex = tdata.geti_from_t(graph_timelist, float(t))
graph_time = graph_timelist[graph_timeindex]
replot()
def set_graph_delay(event, ent):
global graph_delay
graph_delay = float(ent.get())
def min_graph_time():
global graph_time
global graph_timeindex
global graph_timelist
graph_timeindex = 0
graph_time = graph_timelist[graph_timeindex]
update_graph_time_entry()
def max_graph_time():
global graph_time
global graph_timeindex
global graph_timelist
graph_timeindex = len(graph_timelist)-1
graph_time = graph_timelist[graph_timeindex]
update_graph_time_entry()
def inc_graph_time():
global graph_time
global graph_timeindex
global graph_timelist
if graph_timeindex<len(graph_timelist)-1:
graph_timeindex += 1
graph_time = graph_timelist[graph_timeindex]
update_graph_time_entry()
def dec_graph_time():
global graph_time
global graph_timeindex
global graph_timelist
if graph_timeindex>0:
graph_timeindex -= 1
graph_time = graph_timelist[graph_timeindex]
update_graph_time_entry()
def play_graph_time():
global graph_time
global graph_timeindex
global graph_timelist
inc_graph_time()
if graph_timeindex<len(graph_timelist)-1:
play_id = root.after(int(graph_delay), play_graph_time)
else:
play_id = root.after(int(graph_delay), update_graph_time_entry)
def cancel_callback(event):
root.after_cancel(play_id)
root.bind("<Button-1>", cancel_callback)
def start_play_graph_time():
global graph_time
global graph_timeindex
global graph_timelist
i1 = graph_timeindex
if i1>=len(graph_timelist)-1:
i1 = 0
graph_timeindex = i1
graph_time = graph_timelist[graph_timeindex]
update_graph_time_entry()
root.after(int(graph_delay), play_graph_time)
def open_and_plot_file():
open_file()
replot()
def save_movieframes():
global graph_time
global graph_timeindex
global graph_timelist
global fig
# get filename
fname = filedialog.asksaveasfilename(initialfile='frame.png',
title='Enter base movie frame name with extension')
if len(fname) == 0: # if user presses cancel fname is () or '', so exit
return
p = fname.rfind('.')
if p >= 0:
ext = fname[p:]
base = fname[:p]
else:
ext = ''
base = fname
# first and last time index
i1 = 0
i2 = len(graph_timelist)
# format (something like '%.6d') we use to print time index into filename
fmt = '%.'
fmt += '%d' % int( np.log10(i2)+1 )
fmt += 'd'
# loop over time indices
for graph_timeindex in range(i1, i2):
graph_time = graph_timelist[graph_timeindex]
update_graph_time_entry()
tstr = fmt % graph_timeindex
name = base + '_' + tstr + ext
canvas.print_figure(name)
if graph_timeindex == i1:
name1 = name
movie_message(name1, name)
def movie_message(name1, name2):
top1 = Tk()
top1.wm_title("Movie Frame Info")
str = ' Movie Frames have been saved in the files: \n'
str += ' ' + name1 + ' ... ' + name2 + ' \n'
l1 = Label(master=top1, text=str)
l1.pack(side=TOP, expand=1)
button = Button(top1, text="Close", command=top1.destroy)
button.pack(side=TOP)
top1.mainloop()
def about():
top1 = Tk()
top1.wm_title("About tgraph")
str = " tgraph " + tgraph_version + " \n\n"
str += " Produce quick 2D or 3D graphs from files given on the command line. \n"
str += " Read the file tgraph.txt for help. \n\n"
str += " Copyright (C) 2015 Wolfgang Tichy. \n"
l1 = Label(master=top1, text=str)
l1.pack(side=TOP, expand=1)
button = Button(top1, text="Close", command=top1.destroy)
button.pack(side=TOP)
top1.mainloop()
def help():
global tgraph_txt_file
top1 = Tk()
top1.wm_title("tgraph help")
str = " tgraph.txt for tgraph " + tgraph_version + ","
str += " Copyright (C) 2015 Wolfgang Tichy. "
l1 = Label(master=top1, text=str)
l1.pack(side=TOP, expand=1)
scrollbar = Scrollbar(master=top1)
scrollbar.pack(side=RIGHT, fill=Y)
text = Text(master=top1, wrap=WORD, yscrollcommand=scrollbar.set)
tgraph_txt = "file tgraph.txt not found!"
with open(tgraph_txt_file, 'r') as f:
tgraph_txt = f.read()
text.insert("1.0", tgraph_txt)
# text.config(state=DISABLED) # no editing in text window
text.pack()
scrollbar.config(command=text.yview)
button = Button(top1, text="Close", command=top1.destroy)
button.pack(side=TOP)
top1.mainloop()
# simple dialog that opens window where we can enter values for a dictionary
class WTdialog:
# init input form, form is a dict. containing labels and values
def __init__(self, title, formdict):
self.input = {} # init input dict.
self.Entry = {} # init tk Entry dict.
self.top = Toplevel(root) # root is parent and can now wait for self.top
self.top.wm_title(title)
f0 = Frame(master=self.top)
f0.pack(side=TOP, expand=1)
row = 0
for key in sorted(formdict):
label = key
entry = formdict[key]
l1 = Label(master=f0, text=label)
l1.grid(row=row, column=0)
e1 = Entry(master=f0, width=80)
e1.grid(row=row, column=1)
e1.delete(0, END)
e1.insert(0, entry)
# make duplicate of formdict and save tk Entry objects
self.input[label] = entry
self.Entry[label] = e1
row += 1
# add "Apply" button
button = Button(self.top, text="Apply", command=self.apply_changes)
button.pack(side=TOP)
# wait for window self.top
root.wait_window(self.top)
def get_input_values(self):
for key in self.input:
self.input[key] = self.Entry[key].get()
def apply_changes(self):
self.get_input_values()
#self.top.quit()
self.top.destroy()
# use WTdialog to reset some limits
def input_graph_limits():
global fig
global graph_3dOn
global ax
global graph_limits # dict. with options
global graph_xmin
global graph_xmax
global graph_ymin
global graph_ymax
global graph_vmin
global graph_vmax
# get graph_limits
dialog = WTdialog("tgraph Limits", graph_limits)
# now get the user input back
graph_limits = dialog.input
graph_xmin = float(graph_limits['xmin'])
graph_xmax = float(graph_limits['xmax'])
graph_ymin = float(graph_limits['ymin'])
graph_ymax = float(graph_limits['ymax'])
graph_vmin = float(graph_limits['vmin'])
graph_vmax = float(graph_limits['vmax'])
# change axes and then plot again
ax = setup_axes(fig, graph_3dOn, ax)
replot()
# set graph_limits dict. from graph_xmin, graph_xmax, ...
def set_graph_limits():
global graph_limits # dict. with options
global graph_xmin
global graph_xmax
global graph_ymin
global graph_ymax
global graph_vmin
global graph_vmax
graph_limits['xmin'] = graph_xmin
graph_limits['xmax'] = graph_xmax
graph_limits['ymin'] = graph_ymin
graph_limits['ymax'] = graph_ymax
graph_limits['vmin'] = graph_vmin
graph_limits['vmax'] = graph_vmax
# use WTdialog to set xcols
def input_graph_xcolumns():
global filelist
global graph_xmin
global graph_xmax
global fig
global graph_3dOn
global ax
xcoldict = {}
for i in range(0, len(filelist.file)):
xcoldict['#'+str(i)] = filelist.file[i].data.get_xcol0()+1
dialog = WTdialog("tgraph x-Column", xcoldict)
xcoldict = dialog.input
for i in range(0, len(filelist.file)):
filelist.file[i].data.set_xcols(int(xcoldict['#'+str(i)])-1)
graph_xmin = tdata.inf_to_1e300(filelist.minx())
graph_xmax = tdata.inf_to_1e300(filelist.maxx())
set_graph_limits()
print('(xmin, xmax) =', '(', graph_xmin, ',', graph_xmax, ')')
ax = setup_axes(fig, graph_3dOn, ax)
replot()
# use WTdialog to set ycols
def input_graph_ycolumns():
global filelist
global graph_ymin
global graph_ymax
global fig
global graph_3dOn
global ax
ycoldict = {}
for i in range(0, len(filelist.file)):
ycoldict['#'+str(i)] = filelist.file[i].data.get_ycol0()+1
dialog = WTdialog("tgraph y-Column", ycoldict)
ycoldict = dialog.input
for i in range(0, len(filelist.file)):
filelist.file[i].data.set_ycols(int(ycoldict['#'+str(i)])-1)
graph_ymin = tdata.inf_to_1e300(filelist.miny())
graph_ymax = tdata.inf_to_1e300(filelist.maxy())
set_graph_limits()
print('(ymin, ymax) =', '(', graph_ymin, ',', graph_ymax, ')')
ax = setup_axes(fig, graph_3dOn, ax)
replot()
# use WTdialog to set vcols
def input_graph_vcolumns():
global filelist
global graph_vmin
global graph_vmax
global fig
global graph_3dOn
global ax
vcoldict = {}
for i in range(0, len(filelist.file)):
vcoldict['#'+str(i)] = filelist.file[i].data.get_vcol0()+1
dialog = WTdialog("tgraph v-Column", vcoldict)
vcoldict = dialog.input
for i in range(0, len(filelist.file)):
filelist.file[i].data.set_vcols(int(vcoldict['#'+str(i)])-1)
graph_vmin = tdata.inf_to_1e300(filelist.minv())
graph_vmax = tdata.inf_to_1e300(filelist.maxv())
set_graph_limits()
print('(vmin, vmax) =', '(', graph_vmin, ',', graph_vmax, ')')
ax = setup_axes(fig, graph_3dOn, ax)
replot()
# use WTdialog to reset some settings
def input_graph_settings():
global fig
global graph_3dOn
global ax
global graph_settings # dict. with options
global graph_colormap
global graph_stride
# get graph_settings
dialog = WTdialog("tgraph Settings", graph_settings)
# now get the user input back
graph_settings = dialog.input
mpl.rcParams['lines.linewidth'] = graph_settings['linewidth']
if str(graph_settings['colormap']) != '':
exec('global graph_colormap;' +
'graph_colormap = cm.' + str(graph_settings['colormap']))
graph_stride = int(graph_settings['stride'])
# change axes and then plot again
replot()
# use WTdialog to reset some labels
def input_graph_labels():
global graph_labelsOn
global graph_labels # dict. with options
# get graph_labels
dialog = WTdialog("tgraph Labels", graph_labels)
# now get the user input back
graph_labels = dialog.input
mpl.rcParams['font.size'] = graph_labels['fontsize']
# since we edited the labels switch them on now
graph_labelsOn = 1
replot()
# use WTdialog to reset legend
def input_graph_legend():
global filelist
global graph_legendOn
global graph_legend # dict. with options
# for legend
dialog = WTdialog("tgraph Legend", graph_legend)
graph_legend = dialog.input
# save names
for i in range(0, len(filelist.file)):
f = filelist.file[i]
f.name = graph_legend['#'+str(i)]
# Check what loc has. It could be a string, an int, or a coordinate tuple.
s = graph_legend['loc']
if s.isdigit():
graph_legend['loc'] = int(s)
else:
pos = s.find(',')
if pos >= 0:
s = s.replace('[', ' ')
s = s.replace(']', ' ')
s = s.replace('(', ' ')
s = s.replace(')', ' ')
l = s.split(',')
graph_legend['loc'] = [ float(l[0]), float(l[1]) ]
# set some things in mpl.rcParams
mpl.rcParams['legend.fancybox'] = graph_legend['fancybox']
mpl.rcParams['legend.shadow'] = graph_legend['shadow']
mpl.rcParams['legend.frameon'] = graph_legend['frameon']
mpl.rcParams['legend.framealpha'] = graph_legend['framealpha']
mpl.rcParams['legend.handlelength'] = graph_legend['handlelength']
# since we edited the legend switch it on now
graph_legendOn = 1
replot()
# use WTdialog to reset legend
def edit_mpl_rcParams():
dialog = WTdialog("mpl.rcParams", mpl.rcParams)
mpl.rcParams = dialog.input
replot()
# use WTdialog to reset line colors
def input_graph_linecolors():
global filelist
global graph_linecolors # dict. with options
dialog = WTdialog("tgraph Line Colors", graph_linecolors)
graph_linecolors = dialog.input
replot()
# use WTdialog to reset line colors
def input_graph_linestyles():
global filelist
global graph_linestyles # dict. with options
dialog = WTdialog("tgraph Line Styles", graph_linestyles)
graph_linestyles = dialog.input
replot()
# use WTdialog to reset line markers
def input_graph_linemarkers():
global filelist
global graph_linemarkers # dict. with options
dialog = WTdialog("tgraph Line Markers", graph_linemarkers)
graph_linemarkers = dialog.input
replot()
# use WTdialog to reset line widths
def input_graph_linewidths():
global filelist
global graph_linewidths # dict. with options
dialog = WTdialog("tgraph Line Widths", graph_linewidths)
graph_linewidths = dialog.input
replot()
# use WTdialog to do transformations on columns
def input_graph_coltrafos():
global filelist
global graph_coltrafos # dict. with trafos
dialog = WTdialog(
"tgraph Column Transformations, "
"e.g. c[3] = 2*c[2] + sin(t) + D(c[2])/D(c[1])",
graph_coltrafos)
graph_coltrafos = dialog.input
# print(graph_coltrafos)
for i in range(0, len(filelist.file)):
f = filelist.file[i]
trafo = str(graph_coltrafos['#'+str(i)])
if trafo == '':
continue
else:
print("transform", '#'+str(i)+':', trafo)
f.data.transform_col(trafo, c_index_shift=1)
replot()
######################################################################
# except for root window all tk stuff follows below
######################################################################
# make menu bar
menubar = Menu(root)
# create a pulldown menu, and add it to the menu bar
filemenu = Menu(menubar, tearoff=0)
#filemenu.add_command(label="Open", command=not_implemented)
filemenu.add_command(label="Open File", command=open_and_plot_file)
filemenu.add_command(label="Save Movie Frames", command=save_movieframes)
#filemenu.add_separator()
filemenu.add_command(label="Exit", command=root.destroy)
menubar.add_cascade(label="File", menu=filemenu)
# create more pulldown menus
optionsmenu = Menu(menubar, tearoff=0)
optionsmenu.add_command(label="Toggle Timeframe update/add", command=toggle_clear_on_replot)
optionsmenu.add_command(label="Toggle Timeframe closest/exact", command=toggle_plot_closest_t)
optionsmenu.add_command(label="Toggle Axis on/off", command=toggle_axis_on)
optionsmenu.add_command(label="Toggle log/lin x", command=toggle_log_xscale)
optionsmenu.add_command(label="Toggle log/lin y", command=toggle_log_yscale)
optionsmenu.add_command(label="Toggle Grid on/off", command=toggle_plot_grid)
optionsmenu.add_command(label="Toggle Line/Scatter",
command=toggle_wireframe_scatter)
optionsmenu.add_command(label="Toggle 2D/3D", command=toggle_2d_3d)
optionsmenu.add_command(label="Toggle 3D-Surface",
command=toggle_wireframe_surface)
optionsmenu.add_command(label="Toggle Labels", command=toggle_labels)
optionsmenu.add_command(label="Toggle Legend", command=toggle_legend)
#optionsmenu.add_command(label="Show Legend", command=draw_legend)
menubar.add_cascade(label="Options", menu=optionsmenu)
settingsmenu = Menu(menubar, tearoff=0)
settingsmenu.add_command(label="Select x-Columns", command=input_graph_xcolumns)
settingsmenu.add_command(label="Select y-Columns", command=input_graph_ycolumns)
settingsmenu.add_command(label="Select v-Columns", command=input_graph_vcolumns)
settingsmenu.add_command(label="Edit Limits", command=input_graph_limits)
settingsmenu.add_command(label="Edit Labels", command=input_graph_labels)
settingsmenu.add_command(label="Edit Legend", command=input_graph_legend)
settingsmenu.add_command(label="Graph Settings", command=input_graph_settings)
#settingsmenu.add_command(label="Edit rcParams", command=edit_mpl_rcParams)
menubar.add_cascade(label="Settings", menu=settingsmenu)
linesmenu = Menu(menubar, tearoff=0)
linesmenu.add_command(label="Edit Line Colors", command=input_graph_linecolors)
linesmenu.add_command(label="Edit Line Styles", command=input_graph_linestyles)
linesmenu.add_command(label="Edit Line Markers", command=input_graph_linemarkers)
linesmenu.add_command(label="Edit Line Widths", command=input_graph_linewidths)
menubar.add_cascade(label="Lines", menu=linesmenu)
transformationsmenu = Menu(menubar, tearoff=0)
transformationsmenu.add_command(label="Transform Columns",
command=input_graph_coltrafos)
menubar.add_cascade(label="Transformations", menu=transformationsmenu)
# create help pulldown menu
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="About", command=about)
helpmenu.add_command(label="Read help in tgraph.txt", command=help)
menubar.add_cascade(label="Help", menu=helpmenu)
# display the menu
root.config(menu=menubar)
# make a frame where we put time step controls
topframe = Frame(root)
topframe.pack(side=TOP, expand=0)
# entries for time
tl = Label(master=topframe, text="Time")
tl.pack(side=LEFT, expand=0)
tentry = Entry(master=topframe, width=22)
tentry.bind('<Return>', lambda event, ent=tentry: set_graph_time(event, ent))
tentry.bind('<Deactivate>', lambda event, ent=tentry: set_graph_time(event, ent))
tentry.pack(side=LEFT, expand=1)
tentry.delete(0, END)
tentry.insert(0, graph_time)
# add buttons for player
sb = Button(master=topframe, text='<<', width=3, command=min_graph_time)
sb.pack(side=LEFT, expand=1)
bb = Button(master=topframe, text='<', width=3, command=dec_graph_time)
bb.pack(side=LEFT, expand=1)
pb = Button(master=topframe, text='Play', width=5, command=start_play_graph_time)
pb.pack(side=LEFT, expand=1)
fb = Button(master=topframe, text='>', width=3, command=inc_graph_time)
fb.pack(side=LEFT, expand=1)
eb = Button(master=topframe, text='>>', width=3, command=max_graph_time)
eb.pack(side=LEFT, expand=1)
# entries for delay
dl = Label(master=topframe, text=" Delay")
dl.pack(side=LEFT, expand=1)
de = Entry(master=topframe, width=4)
de.bind('<Return>', lambda event, ent=de: set_graph_delay(event, ent))
de.bind('<Leave>', lambda event, ent=de: set_graph_delay(event, ent))
de.pack(side=LEFT, expand=1)
de.delete(0, END)
de.insert(0, "1")
######################################################################
# make figure fig
fig = mpl.figure.Figure(figsize=(7.25, 7), dpi=85)
# Use matplotlib to make a tk.DrawingArea of fig and show it.
# This need needs to come before making ax by: ax = Axes3D(fig)
canvas = FigureCanvasTkAgg(fig, master=root)
canvas.draw()
canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
# setup the axes
ax = setup_axes(fig, graph_3dOn, None)
# make matplotlib toolbar
toolbar = NavToolbar(canvas, root)
toolbar.update()
#canvas._tkcanvas.pack(side=TOP, fill=BOTH, expand=1)
canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
replot()
######################################################################
#print('times =', graph_timelist)
print('(tmin, tmax) =', '(', filelist.mintime(), ',', filelist.maxtime(), ')')
print('(xmin, xmax) =', '(', graph_xmin, ',', graph_xmax, ')')
print('(ymin, ymax) =', '(', graph_ymin, ',', graph_ymax, ')')
print('(vmin, vmax) =', '(', graph_vmin, ',', graph_vmax, ')')
######################################################################
# go into tkinter's main loop and wait for events
root.mainloop()
|
wofti/tgraph
|
tgraph.py
|
Python
|
gpl-3.0
| 39,501
|
[
"VTK"
] |
52def3ca22ed93a5e2c029dfa315006a5bc2971319fec79e85e4dd39d70406bb
|
'''
Neuromuscular simulator in Python.
Copyright (C) 2017 Renato Naville Watanabe
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Contact: renato.watanabe@usp.br
'''
import math
#from numba import jit
import numpy as np
#@jit
def compValOn(v0, alpha, beta, t, t0):
'''
Time course of the state during the pulse for the *inactivation* states
and before and after the pulse for the *activation* states.
The value of the state \f$v\f$ is computed according to the following
equation:
\f{equation}{
v(t) = v_0\exp[-\beta(t-t_0)]
\f}
where \f$t_0\f$ is the time at which the pulse changed
the value (on to off or off to on) and \f$v_0\f$ is value
of the state at that time.
'''
return v0 * np.exp(beta * (t0 - t))
#@jit
def compValOff(v0, alpha, beta, t, t0):
'''
Time course of the state during the pulse for the *activation* states
and before and after the pulse for the *inactivation* states.
The value of the state \f$v\f$ is computed according to the following
equation:
\f{equation}{
v(t) = 1 + (v_0 - 1)\exp[-\alpha(t-t_0)]
\f}
where \f$t_0\f$ is the time at which the pulse changed
the value (on to off or off to on) and \f$v_0\f$ is value
of the state at that time.
'''
return 1.0 + (v0 - 1.0) * np.exp(alpha * (t0 - t))
class PulseConductanceState(object):
'''
Implements the Destexhe pulse approximation of the solution of
the states of the Hodgkin-Huxley neuron model.
'''
def __init__(self, kind, conf, pool, neuronKind, compKind, index):
'''
Initializes the pulse conductance state.
Variables:
+ **kind**: string with type of the state (m, h, n, q).
+ **conf**: an instance of the Configuration class with the functions to correctly parameterize the model. See the Configuration class.
+ **pool**: string with the pool that this state belongs.
+ **neuronKind**: string with the type of the motor unit. It used for
motoneurons. It can be *S* (slow), *FR* (fast and resistant), and *FF*
(fast and fatigable).
+ **compKind**: The kind of compartment that the Channel belongs.
For now, it can be *soma*, *dendrite*, *node* or *internode*.
+ **index**: the index of the unit that this state belongs.
'''
self.kind = kind
self.value = float(0)
self.state = False
self.beta_ms1 = float(conf.parameterSet('beta_' + kind + ':' + pool + '-' + neuronKind + '@' + compKind, pool, index))
self.alpha_ms1 = float(conf.parameterSet('alpha_' + kind + ':' + pool + '-' + neuronKind + '@' + compKind, pool,index))
self.PulseDur_ms = float(conf.parameterSet('PulseDur_' + kind, pool, index))
self.AlphaExp = math.exp(-self.alpha_ms1 * conf.timeStep_ms)
self.BetaExp = math.exp(-self.beta_ms1 * conf.timeStep_ms)
self.endOfPulse_ms = self.PulseDur_ms
if (self.kind == 'm'):
self.actType = 'activation'
if (self.kind == 'h'):
self.actType = 'inactivation'
if (self.kind == 'n'):
self.actType = 'activation'
if (self.kind == 'q'):
self.actType = 'activation'
if (self.kind == 'mp'):
self.actType = 'activation'
if (self.kind == 's'):
self.actType = 'activation'
if (self.kind == 'qh'):
self.actType = 'inactivation'
if (self.actType == 'activation'):
self.computeStateValue = self.computeStateValueActivation
else:
self.computeStateValue = self.computeStateValueInactivation
def changeState(self, t):
'''
Void function that modify the current situation (true/false)
of the state.
- Inputs:
+ **t**: current instant, in ms.
'''
self.state = not self.state
self.endOfPulse_ms = self.PulseDur_ms + t
#@profile
def computeStateValueActivation(self, t):
'''
Compute the state value by using the approximation of Destexhe (1997) to
compute the Hodgkin-Huxley states of *activation* type.
- Input:
+ **t**: current instant, in ms.
The value of the state \f$v\f$ is computed according to the following
equation before and after the pulse:
\f{equation}{
v(t) = v_0\exp[-\beta(t-t_0)]
\f}
and according to the following equation during the pulse:
\f{equation}{
v(t) = 1 + (v_0 - 1)\exp[-\alpha(t-t_0)]
\f}
where \f$t_0\f$ is the time at which the pulse changed
the value (on to off or off to on) and \f$v_0\f$ is value
of the state at that time.
'''
if not self.state:
self.value *= self.BetaExp
else:
if t > self.endOfPulse_ms:
self.changeState(t)
self.value *= self.BetaExp
else:
self.value = (self.value - 1) * self.AlphaExp + 1
#@profile
def computeStateValueInactivation(self, t):
'''
Compute the state value by using the approximation of Destexhe (1997) to
compute the Hodgkin-Huxley states of *inactivation* type.
- Input:
+ **t**: current instant, in ms.
The value of the state \f$v\f$ is computed according to the following
equation before and after the pulse:
\f{equation}{
v(t) = v_0\exp[-\beta(t-t_0)]
\f}
and according to the following equation during the pulse:
\f{equation}{
v(t) = 1 + (v_0 - 1)\exp[-\alpha(t-t_0)]
\f}
where \f$t_0\f$ is the time at which the pulse changed
the value (on to off or off to on) and \f$v_0\f$ is value
of the state at that time.
'''
if not self.state:
self.value = (self.value - 1) * self.AlphaExp + 1
else:
if t > self.endOfPulse_ms:
self.changeState(t)
self.value = (self.value - 1) * self.AlphaExp + 1
else: self.value *= self.BetaExp
def reset(self):
'''
'''
self.value = float(0)
self.endOfPulse_ms = self.PulseDur_ms
|
rnwatanabe/projectPR
|
PulseConductanceState.py
|
Python
|
gpl-3.0
| 7,133
|
[
"NEURON"
] |
18dbcf6b26f29d38ef4ebedc082d1aa521dc6da6cbfa4a3347572ac9508a7f67
|
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import pytest
import numpy as np
import nnabla as nn
import nnabla.experimental.graph_converters as GC
from .ref_graphs.resnets import small_cf_resnet
from .ref_graphs.lenets import lenet
@pytest.mark.parametrize('seed', [313])
@pytest.mark.parametrize('graph', [lenet, small_cf_resnet])
def test_no_grad(seed, graph):
from .graph_converter_test_utils import structure_tester, value_tester
nn.clear_parameters()
# Random number
np.random.seed(seed)
rng = np.random.RandomState(seed)
# Graph
x_data = rng.randn(4, 3, 32, 32)
x0 = nn.Variable.from_numpy_array(x_data)\
.apply(need_grad=False) \
.apply(persistent=True)
y0 = graph(x0)
y1 = y0.no_grad()
# Test
def assert_need_grad_flase(f):
for inp in f.inputs:
assert inp.need_grad == False, "need_grad must be false"
for out in f.outputs:
assert out.need_grad == False, "need_grad must be false"
y1.visit(assert_need_grad_flase)
structure_tester(y0, y1)
value_tester(y0, y1, clear_no_need_grad=True)
|
sony/nnabla
|
python/test/utils/test_graph_converters/test_no_grad.py
|
Python
|
apache-2.0
| 1,701
|
[
"VisIt"
] |
0e8ac53d6ac2112d230dc92b80b42840814f1269724cb7b283c5c1593f55a066
|
__author__ = 'jan'
import matplotlib.pyplot as plt
from prettyplotlib.utils import remove_chartjunk, maybe_get_ax
from prettyplotlib import colors as _colors
import numpy as np
import matplotlib.mlab as mlab
def _beeswarm(ax, x, notch=0, sym='b+', vert=1, whis=1.5,
positions=None, widths=None, patch_artist=False,
bootstrap=None):
"""
Call signature::
beeswarm(x, notch=0, sym='+', vert=1, whis=1.5,
positions=None, widths=None, patch_artist=False)
Make a box and whisker plot for each column of *x* or each
vector in sequence *x*. The box extends from the lower to
upper quartile values of the data, with a line at the median.
The whiskers extend from the box to show the range of the
data. Flier points are those past the end of the whiskers.
Function Arguments:
*x* :
Array or a sequence of vectors.
*notch* : [ 0 (default) | 1]
If 0, produce a rectangular box plot.
If 1, produce a notched box plot
*sym* :
(default 'b+') is the default symbol for flier points.
Enter an empty string ('') if you don't want to show fliers.
*vert* : [1 (default) | 0]
If 1, make the boxes vertical.
If 0, make horizontal boxes. (Odd, but kept for compatibility
with MATLAB boxplots)
*whis* : (default 1.5)
Defines the length of the whiskers as
a function of the inner quartile range. They extend to the
most extreme data point within ( ``whis*(75%-25%)`` ) data range.
*bootstrap* : [ *None* (default) | integer ]
Specifies whether to bootstrap the confidence intervals
around the median for notched boxplots. If *None*, no
bootstrapping is performed, and notches are calculated
using a Gaussian-based asymptotic approximation
(see McGill, R., Tukey, J.W., and Larsen, W.A.,
1978, and Kendall and Stuart, 1967). Otherwise, bootstrap
specifies the number of times to bootstrap the median to
determine its 95% confidence intervals. Values between 1000
and 10000 are recommended.
*positions* : (default 1,2,...,n)
Sets the horizontal positions of
the boxes. The ticks and limits are automatically set to match
the positions.
*widths* : [ scalar | array ]
Either a scalar or a vector to set the width of each box.
The default is 0.5, or ``0.15*(distance between extreme
positions)`` if that is smaller.
*patch_artist* : boolean
If *False* (default), produce boxes with the
:class:`~matplotlib.lines.Line2D` artist.
If *True*, produce boxes with the
:class:`~matplotlib.patches.Patch` artist.
Returns a dictionary mapping each component of the boxplot
to a list of the :class:`~matplotlib.lines.Line2D`
instances created (unless *patch_artist* was *True*. See above.).
**Example:**
.. plot:: pyplots/boxplot_demo.py
"""
if not ax._hold: ax.cla()
holdStatus = ax._hold
whiskers, caps, boxes, medians, fliers = [], [], [], [], []
# convert x to a list of vectors
if hasattr(x, 'shape'):
if len(x.shape) == 1:
if hasattr(x[0], 'shape'):
x = list(x)
else:
x = [x,]
elif len(x.shape) == 2:
nr, nc = x.shape
if nr == 1:
x = [x]
elif nc == 1:
x = [x.ravel()]
else:
x = [x[:,i] for i in xrange(nc)]
else:
raise ValueError("input x can have no more than 2 dimensions")
if not hasattr(x[0], '__len__'):
x = [x]
col = len(x)
# get some plot info
if positions is None:
positions = range(1, col + 1)
if widths is None:
distance = max(positions) - min(positions)
widths = min(0.15*max(distance,1.0), 0.5)
if isinstance(widths, float) or isinstance(widths, int):
widths = np.ones((col,), float) * widths
# loop through columns, adding each to plot
ax.hold(True)
for i,pos in enumerate(positions):
d = np.ravel(x[i])
row = len(d)
if row==0:
# no data, skip this position
continue
# get median and quartiles
q1, med, q3 = mlab.prctile(d,[25,50,75])
# get high extreme
iq = q3 - q1
hi_val = q3 + whis*iq
wisk_hi = np.compress( d <= hi_val , d )
if len(wisk_hi) == 0:
wisk_hi = q3
else:
wisk_hi = max(wisk_hi)
# get low extreme
lo_val = q1 - whis*iq
wisk_lo = np.compress( d >= lo_val, d )
if len(wisk_lo) == 0:
wisk_lo = q1
else:
wisk_lo = min(wisk_lo)
# get x locations for fliers, whisker, whisker cap and box sides
box_x_min = pos - widths[i] * 0.5
box_x_max = pos + widths[i] * 0.5
# get fliers - if we are showing them
flier = []
flier_x = []
if len(sym) != 0:
flier = d
flier_x = np.ones(flier.shape[0]) * pos
flier_x = np.linspace(box_x_min , box_x_max, flier.shape[0])
wisk_x = np.ones(2) * pos
cap_x_min = pos - widths[i] * 0.25
cap_x_max = pos + widths[i] * 0.25
cap_x = [cap_x_min, cap_x_max]
# get y location for median
med_y = [med, med]
# calculate 'regular' plot
if notch == 0:
# make our box vectors
box_x = [box_x_min, box_x_max, box_x_max, box_x_min, box_x_min ]
box_y = [q1, q1, q3, q3, q1 ]
# make our median line vectors
med_x = [box_x_min, box_x_max]
# calculate 'notch' plot
else:
if bootstrap is not None:
# Do a bootstrap estimate of notch locations.
def bootstrapMedian(data, N=5000):
# determine 95% confidence intervals of the median
M = len(data)
percentile = [2.5,97.5]
estimate = np.zeros(N)
for n in range(N):
bsIndex = np.random.random_integers(0,M-1,M)
bsData = data[bsIndex]
estimate[n] = mlab.prctile(bsData, 50)
CI = mlab.prctile(estimate, percentile)
return CI
# get conf. intervals around median
CI = bootstrapMedian(d, N=bootstrap)
notch_max = CI[1]
notch_min = CI[0]
else:
# Estimate notch locations using Gaussian-based
# asymptotic approximation.
#
# For discussion: McGill, R., Tukey, J.W.,
# and Larsen, W.A. (1978) "Variations of
# Boxplots", The American Statistician, 32:12-16.
notch_max = med + 1.57*iq/np.sqrt(row)
notch_min = med - 1.57*iq/np.sqrt(row)
# make our notched box vectors
box_x = [box_x_min, box_x_max, box_x_max, cap_x_max, box_x_max,
box_x_max, box_x_min, box_x_min, cap_x_min, box_x_min,
box_x_min ]
box_y = [q1, q1, notch_min, med, notch_max, q3, q3, notch_max,
med, notch_min, q1]
# make our median line vectors
med_x = [cap_x_min, cap_x_max]
med_y = [med, med]
def to_vc(xs,ys):
# convert arguments to verts and codes
verts = []
#codes = []
for xi,yi in zip(xs,ys):
verts.append( (xi,yi) )
verts.append( (0,0) ) # ignored
codes = [mpath.Path.MOVETO] + \
[mpath.Path.LINETO]*(len(verts)-2) + \
[mpath.Path.CLOSEPOLY]
return verts,codes
def patch_list(xs,ys):
verts,codes = to_vc(xs,ys)
path = mpath.Path( verts, codes )
patch = mpatches.PathPatch(path)
ax.add_artist(patch)
return [patch]
# vertical or horizontal plot?
if vert:
def doplot(*args):
return ax.plot(*args)
def dopatch(xs,ys):
return patch_list(xs,ys)
else:
def doplot(*args):
shuffled = []
for i in xrange(0, len(args), 3):
shuffled.extend([args[i+1], args[i], args[i+2]])
return ax.plot(*shuffled)
def dopatch(xs,ys):
xs,ys = ys,xs # flip X, Y
return patch_list(xs,ys)
if patch_artist:
median_color = 'k'
else:
median_color = 'r'
#whiskers.extend(doplot(wisk_x, [q1, wisk_lo], 'b--',
# wisk_x, [q3, wisk_hi], 'b--'))
#caps.extend(doplot(cap_x, [wisk_hi, wisk_hi], 'k-',
# cap_x, [wisk_lo, wisk_lo], 'k-'))
#if patch_artist:
# boxes.extend(dopatch(box_x, box_y))
#else:
# boxes.extend(doplot(box_x, box_y, 'b-'))
medians.extend(doplot(med_x, med_y, median_color+'-'))
fliers.extend(doplot(flier_x, flier, sym))
# fix our axes/ticks up a little
if 1 == vert:
setticks, setlim = ax.set_xticks, ax.set_xlim
else:
setticks, setlim = ax.set_yticks, ax.set_ylim
newlimits = min(positions)-0.5, max(positions)+0.5
setlim(newlimits)
setticks(positions)
# reset hold status
ax.hold(holdStatus)
return dict(whiskers=whiskers, caps=caps, boxes=boxes,
medians=medians, fliers=fliers)
def beeswarm(*args, **kwargs):
"""
Create a R-like beeswarm plot showing the mean and datapoints.
The difference from matplotlib is only the left axis line is
shown, and ticklabels labeling each category of data can be added.
@param ax:
@param x:
@param kwargs: Besides xticklabels, which is a prettyplotlib-specific
argument which will label each individual beeswarm, many arguments for
matplotlib.pyplot.boxplot will be accepted:
http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.boxplot
Additional arguments include:
*median_color* : (default gray)
The color of median lines
*median_width* : (default 2)
Median line width
*colors* : (default None)
Colors to use when painting a dataseries, for example
list1 = [1,2,3]
list2 = [5,6,7]
ppl.beeswarm([list1, list2], colors=["red", "blue"], xticklabels=["data1", "data2"])
@return:
"""
ax, args, kwargs = maybe_get_ax(*args, **kwargs)
# If no ticklabels are specified, don't draw any
xticklabels = kwargs.pop('xticklabels', None)
colors = kwargs.pop('colors', None)
fontsize = kwargs.pop('fontsize', 10)
gray = _colors.set1[8]
red = _colors.set1[0]
blue = kwargs.pop('color', _colors.set1[1])
kwargs.setdefault('widths', 0.25)
kwargs.setdefault('sym', "o")
bp = _beeswarm(ax, *args, **kwargs)
kwargs.setdefault("median_color", gray)
kwargs.setdefault("median_linewidth", 2)
if xticklabels:
ax.xaxis.set_ticklabels(xticklabels, fontsize=fontsize)
show_caps = kwargs.pop('show_caps', True)
show_ticks = kwargs.pop('show_ticks', False)
remove_chartjunk(ax, ['top', 'right', 'bottom'], show_ticks=show_ticks)
linewidth = 0.75
plt.setp(bp['boxes'], color=blue, linewidth=linewidth)
plt.setp(bp['medians'], color=kwargs.pop("median_color"), linewidth=kwargs.pop("median_linewidth"))
#plt.setp(bp['whiskers'], color=blue, linestyle='solid',
# linewidth=linewidth)
for color, flier in zip(colors, bp['fliers']):
plt.setp(flier, color=color)
#if show_caps:
# plt.setp(bp['caps'], color=blue, linewidth=linewidth)
#else:
# plt.setp(bp['caps'], color='none')
ax.spines['left']._linewidth = 0.5
return bp
|
olgabot/prettyplotlib
|
prettyplotlib/_beeswarm.py
|
Python
|
mit
| 12,196
|
[
"Gaussian"
] |
862ee361544df23a32f1a8400418e201f9b358826f541a542dd0c5863fee83e3
|
# -*- coding: utf-8 -*-
# Copyright © 2012-2013 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function, unicode_literals
from operator import attrgetter
import os
import shutil
import sys
import traceback
from doit.loader import generate_tasks
from doit.cmd_base import TaskLoader
from doit.reporter import ExecutedOnlyReporter
from doit.doit_cmd import DoitMain
from doit.cmd_help import Help as DoitHelp
from doit.cmd_run import Run as DoitRun
from doit.cmd_clean import Clean as DoitClean
from logbook import NullHandler
from . import __version__
from .nikola import Nikola
from .utils import _reload, sys_decode, LOGGER, STRICT_HANDLER
config = {}
def main(args):
quiet = False
if len(args) > 0 and args[0] == 'build' and '--strict' in args:
LOGGER.notice('Running in strict mode')
STRICT_HANDLER.push_application()
if len(args) > 0 and args[0] == 'build' and '-q' in args or '--quiet' in args:
nullhandler = NullHandler()
nullhandler.push_application()
quiet = True
global config
sys.path.append('')
try:
import conf
_reload(conf)
config = conf.__dict__
except Exception:
if os.path.exists('conf.py'):
msg = traceback.format_exc(0).splitlines()[1]
LOGGER.error('In conf.py line {0}: {1}'.format(sys.exc_info()[2].tb_lineno, msg))
sys.exit(1)
config = {}
site = Nikola(**config)
return DoitNikola(site, quiet).run(args)
class Help(DoitHelp):
"""show Nikola usage instead of doit """
@staticmethod
def print_usage(cmds):
"""print nikola "usage" (basic help) instructions"""
print("Nikola is a tool to create static websites and blogs. For full documentation and more information, please visit http://getnikola.com\n\n")
print("Available commands:")
for cmd in sorted(cmds.values(), key=attrgetter('name')):
print(" nikola %-*s %s" % (20, cmd.name, cmd.doc_purpose))
print("")
print(" nikola help show help / reference")
print(" nikola help <command> show command usage")
print(" nikola help <task-name> show task usage")
class Build(DoitRun):
"""expose "run" command as "build" for backward compatibility"""
def __init__(self, *args, **kw):
opts = list(self.cmd_options)
opts.append(
{
'name': 'strict',
'long': 'strict',
'default': False,
'type': bool,
'help': "Fail on things that would normally be warnings.",
}
)
opts.append(
{
'name': 'quiet',
'long': 'quiet',
'short': 'q',
'default': False,
'type': bool,
'help': "Run quietly.",
}
)
self.cmd_options = tuple(opts)
super(Build, self).__init__(*args, **kw)
class Clean(DoitClean):
"""A clean that removes cache/"""
def clean_tasks(self, tasks, dryrun):
if not dryrun and config:
cache_folder = config.get('CACHE_FOLDER', 'cache')
if os.path.exists(cache_folder):
shutil.rmtree(cache_folder)
return super(Clean, self).clean_tasks(tasks, dryrun)
class NikolaTaskLoader(TaskLoader):
"""custom task loader to get tasks from Nikola instead of dodo.py file"""
def __init__(self, nikola, quiet=False):
self.nikola = nikola
self.quiet = quiet
def load_tasks(self, cmd, opt_values, pos_args):
if self.quiet:
DOIT_CONFIG = {
'verbosity': 0,
'reporter': 'zero',
}
else:
DOIT_CONFIG = {
'reporter': ExecutedOnlyReporter,
}
DOIT_CONFIG['default_tasks'] = ['render_site', 'post_render']
tasks = generate_tasks(
'render_site',
self.nikola.gen_tasks('render_site', "Task", 'Group of tasks to render the site.'))
latetasks = generate_tasks(
'post_render',
self.nikola.gen_tasks('post_render', "LateTask", 'Group of tasks to be executes after site is rendered.'))
return tasks + latetasks, DOIT_CONFIG
class DoitNikola(DoitMain):
# overwite help command
DOIT_CMDS = list(DoitMain.DOIT_CMDS) + [Help, Build, Clean]
TASK_LOADER = NikolaTaskLoader
def __init__(self, nikola, quiet=False):
self.nikola = nikola
self.task_loader = self.TASK_LOADER(nikola, quiet)
def get_commands(self):
# core doit commands
cmds = DoitMain.get_commands(self)
# load nikola commands
for name, cmd in self.nikola.commands.items():
cmds[name] = cmd
return cmds
def run(self, cmd_args):
sub_cmds = self.get_commands()
args = self.process_args(cmd_args)
args = [sys_decode(arg) for arg in args]
if len(args) == 0 or any(arg in ["--help", '-h'] for arg in args):
cmd_args = ['help']
args = ['help']
# Hide run because Nikola uses build
sub_cmds.pop('run')
if len(args) == 0 or args[0] not in sub_cmds.keys() or \
args[0] == 'build':
# Check for conf.py before launching run
if not self.nikola.configured:
LOGGER.error("This command needs to run inside an "
"existing Nikola site.")
return False
return super(DoitNikola, self).run(cmd_args)
@staticmethod
def print_version():
print("Nikola version " + __version__)
|
Proteus-tech/nikola
|
nikola/main.py
|
Python
|
mit
| 6,777
|
[
"VisIt"
] |
0f2adfc2d7934aca3468165e7380152faa4fc2876c5bfffa06048bf1cbe3b625
|
## Created 2012 by Scott Harden, AJ4VD
## Updated October 19, 2014 by Andrew Milluzzi, KK4LWR
## Edits:
## - Updated logging date
## Instructions: Update to start of Contest. See instructions on around line 130.
import sqlite3
import time
import pylab
import numpy
import datetime
def toListie(t):
for i in range(len(t)):
t[i]=t[i][0]
return t
class dbStat():
def __init__(self):
self.con=sqlite3.connect('scr.db')
self.c=self.con.cursor()
self.table='qsos'
def getOpData(self):
ops=toListie(self.runQuery("SELECT DISTINCT operator FROM qsos"))
opData={}
for op in ops:
timePoints=toListie(self.runQuery("SELECT stamp FROM qsos WHERE operator='%s'"%op))
for i in range(len(timePoints)):
timePoints[i]=time.mktime(time.strptime(timePoints[i], "%Y-%m-%d %H:%M:%S"))
#print op,len(timePoints),timePoints[1]
opData[op]=timePoints
#print op,"had",len(timePoints),"contacts"
return opData
def runQuery(self,query,commit=False):
if not "SELECT" in query:
f=open('log.txt','a')
f.write("%.02f %s\n"%(time.time(),query.replace("\n"," ")))
f.close()
print "RUNNING QUERY:"
print query
query=query.upper()
data=list(self.c.execute(query).fetchall())
if commit==True: self.commit()
return data
def disconnect(self):
self.con.close()
def kernGauss(size):
size=float(size)
gaussian = lambda x: numpy.exp(-x**2/size)
g = gaussian(numpy.arange(-size/2,size/2))
return g / g.sum()
d=dbStat()
opData=d.getOpData()
##############################
##############################
##############################
opNames=[]
opHours=[]
opRates=[]
opTotls=[]
totalHours=0
for op in opData.keys():
ssis=[]
for i in range(len(opData[op])-1):
ssi=(opData[op][i+1]-opData[op][i])/60.0
if ssi>15:continue
ssis.append(ssi)
opNames.append(op)
opHours.append(sum(ssis)/60.0)
totalHours += sum(ssis)/60.0
opRates.append(len(ssis)*60.0/sum(ssis))
opTotls.append(len(opData[op]))
print "%s operated %.02f hr averaging %.02f QSOs/hr"%\
(op,sum(ssis)/60.0,len(ssis)*60.0/sum(ssis))
#print totalHours
pylab.figure()
pylab.title("Contacts Made")
pylab.ylabel("total number of contacts")
pylab.xlabel("Operator")
pylab.bar(numpy.arange(len(opNames))-.4,opTotls,fc='g')
pylab.xticks(numpy.arange(len(opNames)),opNames)
pylab.savefig('./pages/totals.png')
pylab.clf()
#raise SystemExit(1)
pylab.figure()
pylab.title("QSO Rate")
pylab.ylabel("average contacts per hour")
pylab.xlabel("Operator")
pylab.bar(numpy.arange(len(opNames))-.4,opRates,fc='r')
pylab.xticks(numpy.arange(len(opNames)),opNames)
pylab.savefig('./pages/rate.png')
pylab.clf()
pylab.figure()
hoursLabel = "Operating Time - Total Hours: %.02f" % totalHours
pylab.title(hoursLabel)
pylab.ylabel("Cumulative Hours")
pylab.xlabel("Operator")
pylab.bar(numpy.arange(len(opNames))-.4,opHours,fc='b')
pylab.xticks(numpy.arange(len(opNames)),opNames)
pylab.savefig('./pages/optime.png')
pylab.clf()
#kernel=[1.0/3600]*3600
kernel=kernGauss(120)
dySec=60*60*24
divider=15
# UPDATE THIS LINE BY TIME OF NEW EVENT
# epoch seconds of first day of event
startEpoch = time.mktime(time.strptime("10/20/2014", "%m/%d/%Y"))
startEpoch=startEpoch+(60*60*8)
contestLen=5.5*(60*60*24) #5 days
endEpoch=startEpoch+contestLen
timePoints=numpy.arange(startEpoch,endEpoch,divider)
for op in opData.keys():
hist, bin_edges = numpy.histogram([opData[op]], bins=timePoints)
print "processing data for",op
spikeTimes=opData[op]
spikeTimePoints=numpy.digitize(spikeTimes,timePoints)
digitalValues=numpy.zeros(len(timePoints))
digitalValues[spikeTimePoints]=1
smooth=numpy.convolve(digitalValues,kernel)*60*60/divider
newTimes=numpy.arange(startEpoch,endEpoch+divider*len(smooth),divider)
#newTimes=newTimes+60*60*4
newTimes=map(datetime.datetime.fromtimestamp, newTimes)
#pylab.plot(newTimes,smooth,label=op)
opData[op]=smooth
for day in range(5):
print "plotting day",day+1
#figs.append(pylab.figure())
fig=pylab.figure(figsize=(10,5))
i1=(4*60*60+dySec*(day))/divider
i2=i1+dySec/divider
#cut it in half to only show 12 hours
#i1=i1+dySec/2/divider
dayTimes=newTimes[i1:i2]
for op in opData.keys():
daySmooth=opData[op][i1:i2]
if len(dayTimes)!=len(daySmooth): continue
pylab.plot(dayTimes,daySmooth,label=op,lw=2)
pylab.ylabel("Contact Rate (QSOs per hour)")
pylab.title("Day %d"%(day+1))
pylab.grid()
fig.autofmt_xdate()
pylab.legend(loc=1)
pylab.axis([None,None,None,120])
pylab.savefig("./pages/day"+str(day)+".png")
#pylab.close()
pylab.clf()
print "plotting the week"
fig=pylab.figure(figsize=(10,5))
for op in opData.keys():
#print len(opData[op])
#print len(newTimes)
if len(opData[op]) > len(newTimes): continue
pylab.plot(newTimes[0:len(opData[op])],opData[op],label=op)
pylab.ylabel("Contact Rate (QSOs per hour)")
pylab.title("All Week")
pylab.grid()
fig.autofmt_xdate()
pylab.legend(loc=1)
pylab.axis([None,None,None,120])
pylab.savefig("./pages/dayAll.png")
#pylab.close()
pylab.clf()
print "DONE"
|
swharden/pyHamLog
|
genstats.py
|
Python
|
gpl-2.0
| 5,380
|
[
"Gaussian"
] |
75bbf819150b903c4f12e985d1f0b7b1cc602b06aafaabce268c2921a565391b
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Ruggero Marchei <ruggero.marchei@daemonzone.net>
# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
# Copyright: (c) 2016-2017, Konstantin Shalygin <k0ste@k0ste.ru>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: find
author: Brian Coca (@bcoca)
version_added: "2.0"
short_description: Return a list of files based on specific criteria
description:
- Return a list of files based on specific criteria. Multiple criteria are AND'd together.
- For Windows targets, use the M(ansible.windows.win_find) module instead.
options:
age:
description:
- Select files whose age is equal to or greater than the specified time.
- Use a negative age to find files equal to or less than the specified time.
- You can choose seconds, minutes, hours, days, or weeks by specifying the
first letter of any of those words (e.g., "1w").
type: str
patterns:
default: '*'
description:
- One or more (shell or regex) patterns, which type is controlled by C(use_regex) option.
- The patterns restrict the list of files to be returned to those whose basenames match at
least one of the patterns specified. Multiple patterns can be specified using a list.
- The pattern is matched against the file base name, excluding the directory.
- When using regexen, the pattern MUST match the ENTIRE file name, not just parts of it. So
if you are looking to match all files ending in .default, you'd need to use '.*\.default'
as a regexp and not just '\.default'.
- This parameter expects a list, which can be either comma separated or YAML. If any of the
patterns contain a comma, make sure to put them in a list to avoid splitting the patterns
in undesirable ways.
type: list
aliases: [ pattern ]
elements: str
excludes:
description:
- One or more (shell or regex) patterns, which type is controlled by C(use_regex) option.
- Items whose basenames match an C(excludes) pattern are culled from C(patterns) matches.
Multiple patterns can be specified using a list.
type: list
aliases: [ exclude ]
version_added: "2.5"
elements: str
contains:
description:
- A regular expression or pattern which should be matched against the file content.
type: str
read_whole_file:
description:
- When doing a C(contains) search, determines whether the whole file should be read into
memory or if the regex should be applied to the file line-by-line.
- Setting this to C(true) can have performance and memory implications for large files.
- This uses C(re.search()) instead of C(re.match()).
type: bool
default: false
version_added: "2.11"
paths:
description:
- List of paths of directories to search. All paths must be fully qualified.
type: list
required: true
aliases: [ name, path ]
elements: str
file_type:
description:
- Type of file to select.
- The 'link' and 'any' choices were added in Ansible 2.3.
type: str
choices: [ any, directory, file, link ]
default: file
recurse:
description:
- If target is a directory, recursively descend into the directory looking for files.
type: bool
default: no
size:
description:
- Select files whose size is equal to or greater than the specified size.
- Use a negative size to find files equal to or less than the specified size.
- Unqualified values are in bytes but b, k, m, g, and t can be appended to specify
bytes, kilobytes, megabytes, gigabytes, and terabytes, respectively.
- Size is not evaluated for directories.
type: str
age_stamp:
description:
- Choose the file property against which we compare age.
type: str
choices: [ atime, ctime, mtime ]
default: mtime
hidden:
description:
- Set this to C(yes) to include hidden files, otherwise they will be ignored.
type: bool
default: no
follow:
description:
- Set this to C(yes) to follow symlinks in path for systems with python 2.6+.
type: bool
default: no
get_checksum:
description:
- Set this to C(yes) to retrieve a file's SHA1 checksum.
type: bool
default: no
use_regex:
description:
- If C(no), the patterns are file globs (shell).
- If C(yes), they are python regexes.
type: bool
default: no
depth:
description:
- Set the maximum number of levels to descend into.
- Setting recurse to C(no) will override this value, which is effectively depth 1.
- Default is unlimited depth.
type: int
version_added: "2.6"
seealso:
- module: ansible.windows.win_find
'''
EXAMPLES = r'''
- name: Recursively find /tmp files older than 2 days
find:
paths: /tmp
age: 2d
recurse: yes
- name: Recursively find /tmp files older than 4 weeks and equal or greater than 1 megabyte
find:
paths: /tmp
age: 4w
size: 1m
recurse: yes
- name: Recursively find /var/tmp files with last access time greater than 3600 seconds
find:
paths: /var/tmp
age: 3600
age_stamp: atime
recurse: yes
- name: Find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz
find:
paths: /var/log
patterns: '*.old,*.log.gz'
size: 10m
# Note that YAML double quotes require escaping backslashes but yaml single quotes do not.
- name: Find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz via regex
find:
paths: /var/log
patterns: "^.*?\\.(?:old|log\\.gz)$"
size: 10m
use_regex: yes
- name: Find /var/log all directories, exclude nginx and mysql
find:
paths: /var/log
recurse: no
file_type: directory
excludes: 'nginx,mysql'
# When using patterns that contain a comma, make sure they are formatted as lists to avoid splitting the pattern
- name: Use a single pattern that contains a comma formatted as a list
find:
paths: /var/log
file_type: file
use_regex: yes
patterns: ['^_[0-9]{2,4}_.*.log$']
- name: Use multiple patterns that contain a comma formatted as a YAML list
find:
paths: /var/log
file_type: file
use_regex: yes
patterns:
- '^_[0-9]{2,4}_.*.log$'
- '^[a-z]{1,5}_.*log$'
'''
RETURN = r'''
files:
description: All matches found with the specified criteria (see stat module for full output of each dictionary)
returned: success
type: list
sample: [
{ path: "/var/tmp/test1",
mode: "0644",
"...": "...",
checksum: 16fac7be61a6e4591a33ef4b729c5c3302307523
},
{ path: "/var/tmp/test2",
"...": "..."
},
]
matched:
description: Number of matches
returned: success
type: int
sample: 14
examined:
description: Number of filesystem objects looked at
returned: success
type: int
sample: 34
'''
import fnmatch
import grp
import os
import pwd
import re
import stat
import time
from ansible.module_utils.basic import AnsibleModule
def pfilter(f, patterns=None, excludes=None, use_regex=False):
'''filter using glob patterns'''
if not patterns and not excludes:
return True
if use_regex:
if patterns and not excludes:
for p in patterns:
r = re.compile(p)
if r.match(f):
return True
elif patterns and excludes:
for p in patterns:
r = re.compile(p)
if r.match(f):
for e in excludes:
r = re.compile(e)
if r.match(f):
return False
return True
else:
if patterns and not excludes:
for p in patterns:
if fnmatch.fnmatch(f, p):
return True
elif patterns and excludes:
for p in patterns:
if fnmatch.fnmatch(f, p):
for e in excludes:
if fnmatch.fnmatch(f, e):
return False
return True
return False
def agefilter(st, now, age, timestamp):
'''filter files older than age'''
if age is None:
return True
elif age >= 0 and now - st.__getattribute__("st_%s" % timestamp) >= abs(age):
return True
elif age < 0 and now - st.__getattribute__("st_%s" % timestamp) <= abs(age):
return True
return False
def sizefilter(st, size):
'''filter files greater than size'''
if size is None:
return True
elif size >= 0 and st.st_size >= abs(size):
return True
elif size < 0 and st.st_size <= abs(size):
return True
return False
def contentfilter(fsname, pattern, read_whole_file=False):
"""
Filter files which contain the given expression
:arg fsname: Filename to scan for lines matching a pattern
:arg pattern: Pattern to look for inside of line
:arg read_whole_file: If true, the whole file is read into memory before the regex is applied against it. Otherwise, the regex is applied line-by-line.
:rtype: bool
:returns: True if one of the lines in fsname matches the pattern. Otherwise False
"""
if pattern is None:
return True
prog = re.compile(pattern)
try:
with open(fsname) as f:
if read_whole_file:
return bool(prog.search(f.read()))
for line in f:
if prog.match(line):
return True
except Exception:
pass
return False
def statinfo(st):
pw_name = ""
gr_name = ""
try: # user data
pw_name = pwd.getpwuid(st.st_uid).pw_name
except Exception:
pass
try: # group data
gr_name = grp.getgrgid(st.st_gid).gr_name
except Exception:
pass
return {
'mode': "%04o" % stat.S_IMODE(st.st_mode),
'isdir': stat.S_ISDIR(st.st_mode),
'ischr': stat.S_ISCHR(st.st_mode),
'isblk': stat.S_ISBLK(st.st_mode),
'isreg': stat.S_ISREG(st.st_mode),
'isfifo': stat.S_ISFIFO(st.st_mode),
'islnk': stat.S_ISLNK(st.st_mode),
'issock': stat.S_ISSOCK(st.st_mode),
'uid': st.st_uid,
'gid': st.st_gid,
'size': st.st_size,
'inode': st.st_ino,
'dev': st.st_dev,
'nlink': st.st_nlink,
'atime': st.st_atime,
'mtime': st.st_mtime,
'ctime': st.st_ctime,
'gr_name': gr_name,
'pw_name': pw_name,
'wusr': bool(st.st_mode & stat.S_IWUSR),
'rusr': bool(st.st_mode & stat.S_IRUSR),
'xusr': bool(st.st_mode & stat.S_IXUSR),
'wgrp': bool(st.st_mode & stat.S_IWGRP),
'rgrp': bool(st.st_mode & stat.S_IRGRP),
'xgrp': bool(st.st_mode & stat.S_IXGRP),
'woth': bool(st.st_mode & stat.S_IWOTH),
'roth': bool(st.st_mode & stat.S_IROTH),
'xoth': bool(st.st_mode & stat.S_IXOTH),
'isuid': bool(st.st_mode & stat.S_ISUID),
'isgid': bool(st.st_mode & stat.S_ISGID),
}
def main():
module = AnsibleModule(
argument_spec=dict(
paths=dict(type='list', required=True, aliases=['name', 'path'], elements='str'),
patterns=dict(type='list', default=['*'], aliases=['pattern'], elements='str'),
excludes=dict(type='list', aliases=['exclude'], elements='str'),
contains=dict(type='str'),
read_whole_file=dict(type='bool', default=False),
file_type=dict(type='str', default="file", choices=['any', 'directory', 'file', 'link']),
age=dict(type='str'),
age_stamp=dict(type='str', default="mtime", choices=['atime', 'ctime', 'mtime']),
size=dict(type='str'),
recurse=dict(type='bool', default=False),
hidden=dict(type='bool', default=False),
follow=dict(type='bool', default=False),
get_checksum=dict(type='bool', default=False),
use_regex=dict(type='bool', default=False),
depth=dict(type='int'),
),
supports_check_mode=True,
)
params = module.params
filelist = []
if params['age'] is None:
age = None
else:
# convert age to seconds:
m = re.match(r"^(-?\d+)(s|m|h|d|w)?$", params['age'].lower())
seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
if m:
age = int(m.group(1)) * seconds_per_unit.get(m.group(2), 1)
else:
module.fail_json(age=params['age'], msg="failed to process age")
if params['size'] is None:
size = None
else:
# convert size to bytes:
m = re.match(r"^(-?\d+)(b|k|m|g|t)?$", params['size'].lower())
bytes_per_unit = {"b": 1, "k": 1024, "m": 1024**2, "g": 1024**3, "t": 1024**4}
if m:
size = int(m.group(1)) * bytes_per_unit.get(m.group(2), 1)
else:
module.fail_json(size=params['size'], msg="failed to process size")
now = time.time()
msg = ''
looked = 0
for npath in params['paths']:
npath = os.path.expanduser(os.path.expandvars(npath))
if os.path.isdir(npath):
for root, dirs, files in os.walk(npath, followlinks=params['follow']):
looked = looked + len(files) + len(dirs)
for fsobj in (files + dirs):
fsname = os.path.normpath(os.path.join(root, fsobj))
if params['depth']:
wpath = npath.rstrip(os.path.sep) + os.path.sep
depth = int(fsname.count(os.path.sep)) - int(wpath.count(os.path.sep)) + 1
if depth > params['depth']:
continue
if os.path.basename(fsname).startswith('.') and not params['hidden']:
continue
try:
st = os.lstat(fsname)
except Exception:
msg += "%s was skipped as it does not seem to be a valid file or it cannot be accessed\n" % fsname
continue
r = {'path': fsname}
if params['file_type'] == 'any':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
if stat.S_ISREG(st.st_mode) and params['get_checksum']:
r['checksum'] = module.sha1(fsname)
filelist.append(r)
elif stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and \
agefilter(st, now, age, params['age_stamp']) and \
sizefilter(st, size) and contentfilter(fsname, params['contains'], params['read_whole_file']):
r.update(statinfo(st))
if params['get_checksum']:
r['checksum'] = module.sha1(fsname)
filelist.append(r)
elif stat.S_ISLNK(st.st_mode) and params['file_type'] == 'link':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
if not params['recurse']:
break
else:
msg += "%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n" % npath
matched = len(filelist)
module.exit_json(files=filelist, changed=False, msg=msg, matched=matched, examined=looked)
if __name__ == '__main__':
main()
|
jtyr/ansible
|
lib/ansible/modules/find.py
|
Python
|
gpl-3.0
| 17,142
|
[
"Brian"
] |
dd47a61682a6074cfb10dda6586442a968f349bbcf5349f6baf984c62e667cb0
|
# -*- coding: utf-8 -*-
import numpy as np
from shapely.geometry.polygon import Polygon
import datetime
import netCDF4 as nc
import itertools
import geojson
from shapely.ops import cascaded_union
#from openclimategis.util.helpers import get_temp_path
#from openclimategis.util.toshp import OpenClimateShp
from shapely.geometry.multipolygon import MultiPolygon, MultiPolygonAdapter
from shapely import prepared, wkt
from shapely.geometry.geo import asShape
import time
dtime = 0
class OcgDataset(object):
"""
Wraps and netCDF4-python Dataset object providing extraction methods by
spatial and temporal queries.
dataset -- netCDF4-python Dataset object
**kwds -- arguments for the names of spatial and temporal dimensions.
rowbnds_name
colbnds_name
time_name
time_units
calendar
"""
def __init__(self,dataset,**kwds):
self.dataset = dataset
# self.polygon = kwds.get('polygon')
# self.temporal = kwds.get('temporal')
# self.row_name = kwds.get('row_name') or 'latitude'
# self.col_name = kwds.get('col_name') or 'longitude'
## extract the names of the spatiotemporal variables/dimensions from
## the keyword arguments.
self.rowbnds_name = kwds.get('rowbnds_name') or 'bounds_latitude'
self.colbnds_name = kwds.get('colbnds_name') or 'bounds_longitude'
self.time_name = kwds.get('time_name') or 'time'
self.time_units = kwds.get('time_units') or 'days since 1950-01-01 00:00:00'
self.calendar = kwds.get('calendar') or 'proleptic_gregorian'
self.level_name = kwds.get('level_name') or 'levels'
# self.clip = kwds.get('clip') or False
# self.dissolve = kwds.get('dissolve') or False
# self.row = self.dataset.variables[self.row_name][:]
# self.col = self.dataset.variables[self.col_name][:]
## extract the row and column bounds from the dataset
self.row_bnds = self.dataset.variables[self.rowbnds_name][:]
self.col_bnds = self.dataset.variables[self.colbnds_name][:]
## convert the time vector to datetime objects
self.timevec = nc.netcdftime.num2date(self.dataset.variables[self.time_name][:],
self.time_units,
self.calendar)
## these are base numpy arrays used by spatial operations.
## four numpy arrays one for each bounding coordinate of a polygon
self.min_col,self.min_row = np.meshgrid(self.col_bnds[:,0],self.row_bnds[:,0])
self.max_col,self.max_row = np.meshgrid(self.col_bnds[:,1],self.row_bnds[:,1])
## these are the original indices of the row and columns. they are
## referenced after the spatial subset to retrieve data from the dataset
self.real_col,self.real_row = np.meshgrid(np.arange(0,len(self.col_bnds)),
np.arange(0,len(self.row_bnds)))
def _itr_array_(self,a):
"a -- 2-d ndarray"
ix = a.shape[0]
jx = a.shape[1]
for ii,jj in itertools.product(xrange(ix),xrange(jx)):
yield ii,jj
def _contains_(self,grid,lower,upper):
s1 = grid > lower
s2 = grid < upper
return(s1*s2)
def _set_overlay_(self,polygon=None,clip=False):
"""
Perform spatial operations.
polygon=None -- shapely polygon object
clip=False -- set to True to perform an intersection
"""
print('overlay...')
## holds polygon objects
self._igrid = np.empty(self.min_row.shape,dtype=object)
## holds weights for area weighting in the case of a dissolve
self._weights = np.zeros(self.min_row.shape)
## initial subsetting to avoid iterating over all polygons unless abso-
## lutely necessary
if polygon is not None:
emin_col,emin_row,emax_col,emax_row = polygon.envelope.bounds
smin_col = self._contains_(self.min_col,emin_col,emax_col)
smax_col = self._contains_(self.max_col,emin_col,emax_col)
smin_row = self._contains_(self.min_row,emin_row,emax_row)
smax_row = self._contains_(self.max_row,emin_row,emax_row)
include = np.any((smin_col,smax_col),axis=0)*np.any((smin_row,smax_row),axis=0)
else:
include = np.empty(self.min_row.shape,dtype=bool)
include[:,:] = True
# print('constructing grid...')
# ## construct the subset of polygon geometries
# vfunc = np.vectorize(self._make_poly_array_)
# self._igrid = vfunc(include,
# self.min_row,
# self.min_col,
# self.max_row,
# self.max_col,
# polygon)
#
# ## calculate the areas for potential weighting
# print('calculating area...')
# def _area(x):
# if x != None:
# return(x.area)
# else:
# return(0.0)
# vfunc_area = np.vectorize(_area,otypes=[np.float])
# preareas = vfunc_area(self._igrid)
#
# ## if we are clipping the data, modify the geometries and record the weights
# if clip and polygon:
# print('clipping...')
## polys = []
## for p in self._igrid.reshape(-1):
## polys.append(self._intersection_(polygon,p))
# vfunc = np.vectorize(self._intersection_)
# self._igrid = vfunc(polygon,self._igrid)
#
# ## calculate weights following intersection
# areas = vfunc_area(self._igrid)
# def _weight(x,y):
# if y == 0:
# return(0.0)
# else:
# return(x/y)
# self._weights=np.vectorize(_weight)(areas,preareas)
#
# ## set the mask
# self._mask = self._weights > 0
#
# print('overlay done.')
## loop for each spatial grid element
if polygon:
# prepared_polygon = polygon
prepared_polygon = prepared.prep(polygon)
for ii,jj in self._itr_array_(include):
if not include[ii,jj]: continue
## create the polygon
g = self._make_poly_((self.min_row[ii,jj],self.max_row[ii,jj]),
(self.min_col[ii,jj],self.max_col[ii,jj]))
## add the polygon if it intersects the aoi of if all data is being
## returned.
if polygon:
if not prepared_polygon.intersects(g): continue
# if g.intersects(polygon) or polygon is None:
## get the area before the intersection
prearea = g.area
## full intersection in the case of a clip and an aoi is passed
# if g.overlaps(polygon) and clip is True and polygon is not None:
if clip is True and polygon is not None:
ng = g.intersection(polygon)
## otherwise, just keep the geometry
else:
ng = g
## calculate the weight
w = ng.area/prearea
## a polygon can have a true intersects but actually not overlap
## i.e. shares a border.
if w > 0:
self._igrid[ii,jj] = ng
self._weights[ii,jj] = w
## the mask is used as a subset
self._mask = self._weights > 0
# self._weights = self._weights/self._weights.sum()
def _make_poly_(self,rtup,ctup):
"""
rtup = (row min, row max)
ctup = (col min, col max)
"""
return Polygon(((ctup[0],rtup[0]),
(ctup[0],rtup[1]),
(ctup[1],rtup[1]),
(ctup[1],rtup[0])))
@staticmethod
def _make_poly_array_(include,min_row,min_col,max_row,max_col,polygon=None):
ret = None
if include:
poly = Polygon(((min_col,min_row),
(max_col,min_row),
(max_col,max_row),
(min_col,max_row),
(min_col,min_row)))
if polygon != None:
if polygon.intersects(poly):
ret = poly
else:
ret = poly
return(ret)
@staticmethod
def _intersection_(polygon,target):
ret = None
if target != None:
ppp = target.intersection(polygon)
if not ppp.is_empty:
ret = ppp
return(ret)
def _get_numpy_data_(self,var_name,polygon=None,time_range=None,clip=False,levels = [0]):
"""
var_name -- NC variable to extract from
polygon=None -- shapely polygon object
time_range=None -- [lower datetime, upper datetime]
clip=False -- set to True to perform a full intersection
"""
print('getting numpy data...')
## perform the spatial operations
self._set_overlay_(polygon=polygon,clip=clip)
def _u(arg):
"Pulls unique values and generates an evenly spaced array."
un = np.unique(arg)
return(np.arange(un.min(),un.max()+1))
def _sub(arg):
"Subset an array."
return arg[self._idxrow.min():self._idxrow.max()+1,
self._idxcol.min():self._idxcol.max()+1]
## get the time indices
if time_range is not None:
self._idxtime = np.arange(
0,
len(self.timevec))[(self.timevec>=time_range[0])*
(self.timevec<=time_range[1])]
else:
self._idxtime = np.arange(0,len(self.timevec))
## reference the original (world) coordinates of the netCDF when selecting
## the spatial subset.
self._idxrow = _u(self.real_row[self._mask])
self._idxcol = _u(self.real_col[self._mask])
## subset our reference arrays in a similar manner
self._mask = _sub(self._mask)
self._weights = _sub(self._weights)
self._igrid = _sub(self._igrid)
##check if data is 3 or 4 dimensions
dimShape = len(self.dataset.variables[var_name].dimensions)
## hit the dataset and extract the block
npd = None
narg = time.clock()
if dimShape == 3:
npd = self.dataset.variables[var_name][self._idxtime,self._idxrow,self._idxcol]
elif dimShape == 4:
npd = self.dataset.variables[var_name][self._idxtime,levels,self._idxrow,self._idxcol]
print "dtime: ", time.clock()-narg
## add in an extra dummy dimension in the case of one time layer
if len(npd.shape) == 2:
npd = npd.reshape(1,npd.shape[0],npd.shape[1])
print('numpy extraction done.')
return(npd)
def _is_masked_(self,arg):
"Ensures proper formating of masked data."
if isinstance(arg,np.ma.MaskedArray):
return None
else:
return arg
def extract_elements(self,*args,**kwds):
"""
Merges the geometries and extracted attributes into a GeoJson-like dictionary
list.
var_name -- NC variable to extract from
dissolve=False -- set to True to merge geometries and calculate an
area-weighted average
polygon=None -- shapely polygon object
time_range=None -- [lower datetime, upper datetime]
clip=False -- set to True to perform a full intersection
"""
print('extracting elements...')
## dissolve argument is unique to extract_elements
if 'dissolve' in kwds:
dissolve = kwds.pop('dissolve')
else:
dissolve = False
if 'levels' in kwds:
levels = kwds.get('levels')
## extract numpy data from the nc file
npd = self._get_numpy_data_(*args,**kwds)
##check which flavor of climate data we are dealing with
ocgShape = len(npd.shape)
## will hold feature dictionaries
features = []
## the unique identified iterator
ids = self._itr_id_()
if dissolve:
## one feature is created for each unique time
for kk in range(len(self._idxtime)):
## check if this is the first iteration. approach assumes that
## masked values are homogenous through the time layers. this
## avoids multiple union operations on the geometries. i.e.
## time 1 = masked, time 2 = masked, time 3 = masked
## vs.
## time 1 = 0.5, time 2 = masked, time 3 = 0.46
if kk == 0:
## on the first iteration:
## 1. make the unioned geometry
## 2. weight the data according to area
## reference layer for the masked data
lyr = None
if ocgShape==3:
lyr = npd[kk,:,:]
elif ocgShape==4:
lyr = npd[kk,0,:,:]
## select values with spatial overlap and not masked
if hasattr(lyr,'mask'):
select = self._mask*np.invert(lyr.mask)
else:
select = self._mask
## select those geometries
geoms = self._igrid[select]
## union the geometries
unioned = cascaded_union([p for p in geoms])
## select the weight subset and normalize to unity
sub_weights = self._weights*select
self._weights = sub_weights/sub_weights.sum()
## apply the weighting
weighted = npd*self._weights
## generate the feature
if ocgShape==3:
feature = dict(
id=ids.next(),
geometry=unioned,
properties=dict({VAR:float(weighted[kk,:,:].sum()),
'timestamp':self.timevec[self._idxtime[kk]]}))
elif ocgShape==4:
feature = dict(
id=ids.next(),
geometry=unioned,
properties=dict({VAR:list(float(weighted[kk,x,:,:].sum()) for x in xrange(len(levels))),
'timestamp':self.timevec[self._idxtime[kk]],
'levels':list(x for x in self.dataset.variables[self.level_name][levels])}))
features.append(feature)
else:
## loop for each feature. no dissolving.
for ii,jj in self._itr_array_(self._mask):
## if the data is included, add the feature
if self._mask[ii,jj] == True:
## extract the data and convert any mask values
if ocgShape == 3:
data = [self._is_masked_(da) for da in npd[:,ii,jj]]
for kk in range(len(data)):
## do not add the feature if the value is a NoneType
if data[kk] == None: continue
feature = dict(
id=ids.next(),
geometry=self._igrid[ii,jj],
properties=dict({VAR:float(data[kk]),
'timestamp':self.timevec[self._idxtime[kk]]}))
features.append(feature)
elif ocgShape == 4:
data = [self._is_masked_(da) for da in npd[:,:,ii,jj]]
for kk in range(len(data)):
## do not add the feature if the value is a NoneType
if data[kk] == None: continue
feature = dict(
id=ids.next(),
geometry=self._igrid[ii,jj],
properties=dict({VAR:list(float(data[kk][x]) for x in xrange(len(levels))),
'timestamp':self.timevec[self._idxtime[kk]],
'levels':list(x for x in self.dataset.variables[self.level_name][levels])}))
features.append(feature)
print('extraction complete.')
return(features)
def _itr_id_(self,start=1):
while True:
try:
yield start
finally:
start += 1
def as_geojson(elements):
features = []
for e in elements:
e['properties']['timestamp'] = str(e['properties']['timestamp'])
features.append(geojson.Feature(**e))
fc = geojson.FeatureCollection(features)
return(geojson.dumps(fc))
def as_shp(elements,path=None):
if path is None:
path = get_temp_path(suffix='.shp')
ocs = OpenClimateShp(path,elements)
ocs.write()
return(path)
def multipolygon_operation(dataset,var,polygons,time_range=None,clip=None,dissolve=None,levels = None,ocgOpts=None):
elements = []
ncp = OcgDataset(dataset,**ocgOpts)
for ii,polygon in enumerate(polygons):
print(ii)
elements += ncp.extract_elements(var,
polygon=polygon,
time_range=time_range,
clip=clip,
dissolve=dissolve,
levels = levels)
print(repr(len(elements)))
return(elements)
if __name__ == '__main__':
narg = time.time()
## all
# POLYINT = Polygon(((-99,39),(-94,38),(-94,40),(-100,39)))
## great lakes
#POLYINT = Polygon(((-90.35,40.55),(-83,43),(-80.80,49.87),(-90.35,49.87)))
#POLYINT = Polygon(((-90,30),(-70,30),(-70,50),(-90,50)))
#POLYINT = Polygon(((-90,40),(-80,40),(-80,50),(-90,50)))
#POLYINT = Polygon(((-130,18),(-60,18),(-60,98),(-130,98)))
## return all data
#POLYINT = Polygon(((-124.75, 25.125), (-67.0, 25.125), (-67.0, 52.875), (-124.75, 52.875)))
POLYINT = None
## two areas
#POLYINT = [wkt.loads('POLYGON ((-85.324076923076916 44.028020242914977,-84.280765182186229 44.16008502024291,-84.003429149797569 43.301663967611333,-83.607234817813762 42.91867611336032,-84.227939271255053 42.060255060728736,-84.941089068825903 41.307485829959511,-85.931574898785414 41.624441295546553,-85.588206477732783 43.011121457489871,-85.324076923076916 44.028020242914977))'),
#wkt.loads('POLYGON ((-89.24640080971659 46.061817813765174,-88.942651821862341 46.378773279352224,-88.454012145748976 46.431599190283393,-87.952165991902831 46.11464372469635,-88.163469635627521 45.190190283400803,-88.889825910931165 44.503453441295541,-88.770967611336033 43.552587044534405,-88.942651821862341 42.786611336032379,-89.774659919028338 42.760198380566798,-90.038789473684204 43.777097165991897,-89.735040485829956 45.097744939271251,-89.24640080971659 46.061817813765174))')]
## watersheds
# path = '/home/bkoziol/git/OpenClimateGIS/bin/geojson/watersheds_4326.geojson'
## select = ['HURON']
# select = []
# with open(path,'r') as f:
# data = ''.join(f.readlines())
## data2 = f.read()
# gj = geojson.loads(data)
# POLYINT = []
# for feature in gj['features']:
# if select:
# prop = feature['properties']
# if prop['HUCNAME'] in select:
# pass
# else:
# continue
# geom = asShape(feature['geometry'])
# if not isinstance(geom,MultiPolygonAdapter):
# geom = [geom]
# for polygon in geom:
# POLYINT.append(polygon)
NC = '/home/reid/Desktop/ncconv/pcmdi.ipcc4.bccr_bcm2_0.1pctto2x.run1.monthly.cl_A1_1.nc'
#NC = '/home/bkoziol/git/OpenClimateGIS/bin/climate_data/maurer/bccr_bcm2_0.1.sresa1b.monthly.Prcp.1950.nc'
#NC = '/home/reid/Desktop/ncconv/bccr_bcm2_0.1.sresa1b.monthly.Prcp.1950.nc'
#NC = 'http://hydra.fsl.noaa.gov/thredds/dodsC/oc_gis_downscaling.bccr_bcm2.sresa1b.Prcp.Prcp.1.aggregation.1'
# TEMPORAL = [datetime.datetime(1950,2,1),datetime.datetime(1950,4,30)]
#TEMPORAL = [datetime.datetime(1950,2,1),datetime.datetime(1950,3,1)]
TEMPORAL = [datetime.datetime(1960,3,16),datetime.datetime(1961,3,16)] #time range for multi-level file
DISSOLVE = True
CLIP = True
VAR = 'cl'
#VAR = 'Prcp'
#kwds={}
kwds = {
'rowbnds_name': 'lat_bnds',
'colbnds_name': 'lon_bnds',
#'time_units': 'days since 1950-1-1 0:0:0.0',
'time_units': 'days since 1800-1-1 00:00:0.0',
'level_name': 'lev'
}
LEVELS = [x for x in range(0,10)]
## open the dataset for reading
dataset = nc.Dataset(NC,'r')
## make iterable if only a single polygon requested
if type(POLYINT) not in (list,tuple): POLYINT = [POLYINT]
## convenience function for multiple polygons
elements = multipolygon_operation(dataset,
VAR,
POLYINT,
time_range=TEMPORAL,
clip=CLIP,
dissolve=DISSOLVE,
levels = LEVELS,
ocgOpts=kwds
)
# out = as_shp(elements)
dtime = time.time()
out = as_geojson(elements)
with open('./out_NM','w') as f:
f.write(out)
dtime = time.time()-dtime
blarg = time.time()
print blarg-narg,dtime,blarg-narg-dtime
|
OpenSource-/OpenClimateGIS
|
src/openclimategis/util/ncconv/experimental/OLD_experimental/in_memory_oo_update.py
|
Python
|
bsd-3-clause
| 22,320
|
[
"NetCDF"
] |
ba13f904a59bdcb688ad7d0c3a6be17ec8bf5957052eff1237a36c2d1f7527fe
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The transformer encoder used by ELECTRA. Essentially BERT's with a few
additional functionalities added.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
class BertConfig(object):
"""Configuration for `BertModel` (ELECTRA uses the same model as BERT)."""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with tf.io.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertModel(object):
"""BERT model. Although the training algorithm is different, the transformer
model for ELECTRA is the same as BERT's.
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])
token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])
config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.BertModel(config=config, is_training=True,
input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)
label_embeddings = tf.get_variable(...)
pooled_output = model.get_pooled_output()
logits = tf.matmul(pooled_output, label_embeddings)
...
```
"""
def __init__(self,
bert_config,
is_training,
input_ids,
input_mask=None,
token_type_ids=None,
use_one_hot_embeddings=True,
scope=None,
embedding_size=None,
input_embeddings=None,
input_reprs=None,
update_embeddings=True,
untied_embeddings=False,
ltr=False,
rtl=False):
"""Constructor for BertModel.
Args:
bert_config: `BertConfig` instance.
is_training: bool. true for training model, false for eval model. Controls
whether dropout will be applied.
input_ids: int32 Tensor of shape [batch_size, seq_length].
input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
use_one_hot_embeddings: (optional) bool. Whether to use one-hot word
embeddings or tf.embedding_lookup() for the word embeddings. On the TPU,
it is much faster if this is True, on the CPU or GPU, it is faster if
this is False.
scope: (optional) variable scope. Defaults to "electra".
Raises:
ValueError: The config is invalid or one of the input tensor shapes
is invalid.
"""
bert_config = copy.deepcopy(bert_config)
if not is_training:
bert_config.hidden_dropout_prob = 0.0
bert_config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(token_type_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
assert token_type_ids is not None
if input_reprs is None:
if input_embeddings is None:
with tf.variable_scope(
(scope if untied_embeddings else "electra") + "/embeddings",
reuse=tf.AUTO_REUSE):
# Perform embedding lookup on the word ids
if embedding_size is None:
embedding_size = bert_config.hidden_size
(self.token_embeddings, self.embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=bert_config.vocab_size,
embedding_size=embedding_size,
initializer_range=bert_config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings)
else:
self.token_embeddings = input_embeddings
with tf.variable_scope(
(scope if untied_embeddings else "electra") + "/embeddings",
reuse=tf.AUTO_REUSE):
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
self.embedding_output = embedding_postprocessor(
input_tensor=self.token_embeddings,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=bert_config.type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=bert_config.initializer_range,
max_position_embeddings=bert_config.max_position_embeddings,
dropout_prob=bert_config.hidden_dropout_prob)
else:
self.embedding_output = input_reprs
if not update_embeddings:
self.embedding_output = tf.stop_gradient(self.embedding_output)
with tf.variable_scope(scope, default_name="electra"):
if self.embedding_output.shape[-1] != bert_config.hidden_size:
self.embedding_output = tf.layers.dense(
self.embedding_output, bert_config.hidden_size,
name="embeddings_project")
with tf.variable_scope("encoder"):
# This converts a 2D mask of shape [batch_size, seq_length] to a 3D
# mask of shape [batch_size, seq_length, seq_length] which is used
# for the attention scores.
attention_mask = create_attention_mask_from_input_mask(
token_type_ids, input_mask)
# Add causal masking to the attention for running the transformer
# left-to-right or right-to-left
if ltr or rtl:
causal_mask = tf.ones((seq_length, seq_length))
if ltr:
causal_mask = tf.matrix_band_part(causal_mask, -1, 0)
else:
causal_mask = tf.matrix_band_part(causal_mask, 0, -1)
attention_mask *= tf.expand_dims(causal_mask, 0)
# Run the stacked transformer. Output shapes
# sequence_output: [batch_size, seq_length, hidden_size]
# pooled_output: [batch_size, hidden_size]
# all_encoder_layers: [n_layers, batch_size, seq_length, hidden_size].
# attn_maps: [n_layers, batch_size, n_heads, seq_length, seq_length]
(self.all_layer_outputs, self.attn_maps) = transformer_model(
input_tensor=self.embedding_output,
attention_mask=attention_mask,
hidden_size=bert_config.hidden_size,
num_hidden_layers=bert_config.num_hidden_layers,
num_attention_heads=bert_config.num_attention_heads,
intermediate_size=bert_config.intermediate_size,
intermediate_act_fn=get_activation(bert_config.hidden_act),
hidden_dropout_prob=bert_config.hidden_dropout_prob,
attention_probs_dropout_prob=
bert_config.attention_probs_dropout_prob,
initializer_range=bert_config.initializer_range,
do_return_all_layers=True)
self.sequence_output = self.all_layer_outputs[-1]
self.pooled_output = self.sequence_output[:, 0]
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
"""Gets final hidden layer of encoder.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the final hidden of the transformer encoder.
"""
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_layer_outputs
def get_embedding_output(self):
"""Gets output of the embedding lookup (i.e., input to the transformer).
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the embedding layer, after summing the word
embeddings with the positional embeddings and the token type embeddings,
then performing layer normalization. This is the input to the transformer.
"""
return self.embedding_output
def get_embedding_table(self):
return self.embedding_table
def gelu(input_tensor):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
input_tensor: float Tensor to perform activation.
Returns:
`input_tensor` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.math.erf(input_tensor / tf.sqrt(2.0)))
return input_tensor * cdf
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def get_assignment_map_from_checkpoint(tvars, init_checkpoint, prefix=""):
"""Compute the union of the current variables and checkpoint variables."""
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
initialized_variable_names = {}
assignment_map = collections.OrderedDict()
for x in tf.train.list_variables(init_checkpoint):
(name, var) = (x[0], x[1])
if prefix + name not in name_to_variable:
continue
assignment_map[name] = prefix + name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return assignment_map, initialized_variable_names
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return contrib_layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.nn.embedding_lookup()`. One hot is better
for TPUs.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
original_dims = input_ids.shape.ndims
if original_dims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
if original_dims == 3:
input_shape = get_shape_list(input_ids)
tf.reshape(input_ids, [-1, input_shape[-1]])
output = tf.matmul(input_ids, embedding_table)
output = tf.reshape(output,
[input_shape[0], input_shape[1], embedding_size])
else:
if use_one_hot_embeddings:
flat_input_ids = tf.reshape(input_ids, [-1])
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.nn.embedding_lookup(embedding_table, input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return output, embedding_table
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(
shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if batch_size is None or from_seq_length is None or to_seq_length is None:
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
query_layer = tf.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=query_act,
name="query",
kernel_initializer=create_initializer(initializer_range))
# `key_layer` = [B*T, N*H]
key_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=key_act,
name="key",
kernel_initializer=create_initializer(initializer_range))
# `value_layer` = [B*T, N*H]
value_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=value_act,
name="value",
kernel_initializer=create_initializer(initializer_range))
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `value_layer` = [B, T, N, H]
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
# `value_layer` = [B, N, T, H]
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
# `context_layer` = [B, N, F, H]
context_layer = tf.matmul(attention_probs, value_layer)
# `context_layer` = [B, F, N, H]
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
# `context_layer` = [B*F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
# `context_layer` = [B, F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
return context_layer, attention_probs
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
attn_maps = []
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("layer_%d" % layer_idx):
with tf.variable_scope("attention"):
attention_heads = []
with tf.variable_scope("self"):
attention_head, probs = attention_layer(
from_tensor=prev_output,
to_tensor=prev_output,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length)
attention_heads.append(attention_head)
attn_maps.append(probs)
attention_output = None
if len(attention_heads) == 1:
attention_output = attention_heads[0]
else:
# In the case where we have other sequences, we just concatenate
# them to the self-attention head before the projection.
attention_output = tf.concat(attention_heads, axis=-1)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = tf.layers.dense(
attention_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + prev_output)
# The activation is only applied to the "intermediate" hidden layer.
with tf.variable_scope("intermediate"):
intermediate_output = tf.layers.dense(
attention_output,
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range))
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
prev_output = tf.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
prev_output = dropout(prev_output, hidden_dropout_prob)
prev_output = layer_norm(prev_output + attention_output)
all_layer_outputs.append(prev_output)
attn_maps = tf.stack(attn_maps, 0)
if do_return_all_layers:
return tf.stack([reshape_from_matrix(layer, input_shape)
for layer in all_layer_outputs], 0), attn_maps
else:
return reshape_from_matrix(prev_output, input_shape), attn_maps
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if isinstance(tensor, np.ndarray) or isinstance(tensor, list):
shape = np.array(tensor).shape
if isinstance(expected_rank, six.integer_types):
assert len(shape) == expected_rank
elif expected_rank is not None:
assert len(shape) in expected_rank
return shape
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
|
google-research/electra
|
model/modeling.py
|
Python
|
apache-2.0
| 39,858
|
[
"Gaussian"
] |
8fef544eb179d8c9d89095760f76038f8d488d16edf0cc004d406d8848c2bf73
|
"""
potential.py
Experimental Data & Simulation Synthesis.
This file contains classes necessary for the integration of simulation and
experimental data. Specifically, it serves a Potential class used (by md.py
or mc.py) to run simulations. Further,
"""
import abc
import numpy as np
from mdtraj import Trajectory
from mdtraj import utils as mdutils
from odin import exptdata
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
#logger.setLevel('DEBUG')
class Potential(object):
"""
Attributes for a kind of experimental potential. Any potential that
inherets from this class can be sampled (integrated) by the samplers in
odin/sample.py.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __call__(self, trajectory):
"""
Takes a set of xyz coordinates and evaluates the potential on that
conformation.
Parameters
----------
trajectory : mdtraj.Trajectory
A trajectory to evaluate the potential at
"""
return energy
def _check_is_traj(self, trajectory):
"""
ensure an `trajectory` object faithfully represents an atomic configuration
"""
# typecheck
if not type(trajectory) == Trajectory:
raise TypeError('`trajectory` must be type mdtraj.Trajectory, got: %s' % type(trajectory))
return
class FlatPotential(Potential):
"""
This is a minimal implementation of a Potenial object, used mostly for testing.
It can also be used to integrate a model in a prior potential only, without
any experimental information.
"""
def __call__(self, traj):
"""
Takes a set of xyz coordinates and evaluates the potential on that
conformation.
Parameters
----------
trajectory : mdtraj.Trajectory
A trajectory to evaluate the potential at
"""
self._check_is_traj(traj)
return np.ones(traj.n_frames)
class ExptPotential(Potential):
"""
An incomplete implementation of a 'Potential' that adds a number of useful
"""
def add_experiment(self, expt):
"""
Add an experiment to the potential object.
Parameters
----------
expt : odin.exptdata.ExptDataBase
An experiment.
"""
if not isinstance(expt, exptdata.ExptDataBase):
raise TypeError('each of `experiments` must inheret from odin.exptdata.ExptDataBase')
self._experiments.append(expt)
self._num_measurements += expt.num_data
return
@property
def num_measurements(self):
return self._num_measurements
@property
def num_experiments(self):
return len(self._experiments)
def predictions(self, trajectory):
"""
Method to predict the array `values` for each snapshot in `trajectory`.
Parameters
----------
trajectory : mdtraj.trajectory
A trajectory to predict the experimental values for.
Returns
-------
prediction : ndarray, 2-D
The predicted values. Will be two dimensional,
len(trajectory) X len(values).
"""
predictions = np.array([[]])
for expt in self._experiments:
predictions = np.concatenate([ predictions, expt.predict(trajectory) ], axis=1)
assert predictions.shape[0] == trajectory.n_frames
assert len(predictions.shape) == 2
return predictions
class WeightedExptPotential(ExptPotential):
"""
This class implements a potential of the form:
V(x) = sum_i { lambda_i * f_i(x) }
-- lambda_i is a scalar weight
-- f_i is an experimental prediction for conformation 'x'
"""
def __init__(self, *experiments):
"""
Initialize WeightedExptPotential.
Parameters
----------
*args : odin.exptdata.ExptDataBase
Pass any number of experimental data sets, all of which are combined
into the weighted potenial.
"""
self._experiments = []
self._num_measurements = 0
self._weights = np.array([])
for expt in experiments:
self.add_experiment(expt)
return
def __call__(self, trajectory):
"""
Takes a set of xyz coordinates and evaluates the potential on that
conformation.
Parameters
----------
trajectory : mdtraj.Trajectory
A trajectory to evaluate the potential at
"""
self._check_is_traj(trajectory)
energy = np.sum( self.weights[None,:] * self.predictions(trajectory), axis=1 )
return energy
def add_experiment(self, expt):
"""
Add an experiment to the potential object.
Parameters
----------
expt : odin.exptdata.ExptDataBase
An experiment.
"""
super(WeightedExptPotential, self).add_experiment(expt)
self._weights = np.concatenate([ self._weights, np.ones(expt.num_data) ])
assert len(self._weights) == self._num_measurements
return
@property
def weights(self):
return self._weights
def set_all_weights(self, weights):
"""
Set the weights for all the experiments.
Parameters
----------
weights : np.ndarray
An array of the weights to use. Must be self.num_measurements long.
"""
if not type(weights) == np.ndarray:
raise
if not len(weights) == self.num_measurements:
raise ValueError('`weights` must be len self.num_measurements. Got'
' len: %d, require: %d' % (len(weights),
self.num_measurements))
self._weights = weights
return
def expt_weights(self, expt_index):
"""
Get the weights corresponding to a single experiment.
Parameters
----------
expt_index : int
The index corresponding to the experiment to get weights for.
"""
start = 0
for i,expt in enumerate(self._experiments):
if i == expt_index:
end = start + expt.num_data
break
else:
start += expt.num_data
logger.debug('start/end: %d/%d' % (start, end))
return self._weights[start:end]
|
tjlane/odin
|
src/python/potential.py
|
Python
|
gpl-2.0
| 6,842
|
[
"MDTraj"
] |
c7c6b97fe4e8ea5b3d62b9436f9c3387404c4d45f05ff784322d05acdae60710
|
from simulate import *
import matplotlib.pyplot as plt
class possum(simulate):
"""
Class for creating polarization and
faraday rotation spectra.
Frequency Coverages:
_createWSRT()
Frequency range for the Westerbork
Synthesis Radio Telescope
310 - 380 MHz
_createASKAP12()
ASKAP12 frequency coverage
700 - 1300 MHz
1500 - 1800 MHz
_createASKAP36()
ASKAP36 frequency coverage
1130 - 1430 MHz
"""
def __init__(self):
self.__c = 2.99e+08 # speed of light in m/s
self.__mhz = 1.0e+06
def _createWSRT(self, *args):
"""
Create the WSRT frequency spectrum:
310 - 380 MHz
"""
self.nu_ = self._createFrequency(310., 380., nchan=400)
def _createASKAP12(self, *args):
"""
Create the ASKAP12 frequency range:
700 - 1300 MHz
1500 - 1800 MHz
To call:
_createASKAP12()
Parameters:
[None]
Postcondition:
"""
band12 = self._createFrequency(700.,1300.,nchan=600)
band3 = self._createFrequency(1500.,1800.,nchan=300)
self.nu_ = np.concatenate((band12, band3))
def _createASKAP36(self, *args):
"""
Create the ASKAP36 frequency range:
1130 - 1430 MHZ
To call:
_createASKAP36()
Parameters:
[None]
Postcondition:
"""
self.nu_ = self._createFrequency(1130., 1430., nchan=300)
def _createFrequency(self, numin=700., numax=1800., nchan=100., store=False):
"""
Creates an array of evenly spaced frequencies
numin and numax are in [MHz]
To call:
_createFrequency(numin, numax, nchan)
Parameters:
numin
numax
Postcondition:
"""
# ======================================
# Convert MHz to Hz
# ======================================
numax = numax * self.__mhz
numin = numin * self.__mhz
# ======================================
# Generate an evenly spaced grid
# of frequencies and return
# ======================================
if store:
self.nu_ = np.arange(nchan)*(numax-numin)/(nchan-1) + numin
else:
return(np.arange(nchan)*(numax-numin)/(nchan-1) + numin)
def _createNspec(self, flux, depth, chi, sig=0):
"""
Function for generating N faraday spectra
and merging into one polarization spectrum.
To call:
createNspec(flux, depth, chi, sig)
Parameters:
flux [float, array]
depth [float, array]
chi [float, array]
sig [float, const]
"""
# ======================================
# Convert inputs to matrices
# ======================================
nu = np.asmatrix(self.nu_)
flux = np.asmatrix(flux).T
chi = np.asmatrix(chi).T
depth = np.asmatrix(depth).T
# ======================================
# Compute the polarization
# ======================================
P = flux.T * np.exp(2j * (chi + depth * np.square(self.__c / nu)))
P = np.ravel(P)
# ======================================
# Add Gaussian noise
# ======================================
if sig != 0:
P += self._addNoise(sig, P.size)
# ======================================
# Store the polarization
# ======================================
self.polarization_ = P
def _createFaradaySpectrum(self, philo=-250, phihi=250):
"""
Function for creating the Faraday spectrum
"""
F = []
phi = []
chiSq = np.mean( (self.__c / self.nu_)**2)
for far in range(philo, phihi+1):
phi.append(far)
temp = np.exp(-2j * far * ((self.__c / self.nu_)**2 - chiSq))
temp = np.sum( self.polarization_ * temp)
F.append(temp)
faraday = np.asarray(F) / len(self.nu_)
self.phi_ = np.asarray(phi)
self.faraday_ = faraday / np.abs(faraday).max()
def _addNoise(self, sigma, N):
"""
Function for adding real and
imaginary noise
To call:
_addNoise(sigma, N)
Parameters:
sigma
N
"""
noiseReal = np.random.normal(scale=sigma, size=N)
noiseImag = 1j * np.random.normal(scale=sigma, size=N)
return(noiseReal + noiseImag)
# ======================================================
# Try to recreate figure 21 in Farnsworth et. al (2011)
#
# Haven't been able to get the large offset;
# peak appears between the two RM components
# ======================================================
if __name__ == '__main__':
spec = possum()
spec._simulateNspec(5)
plt.plot(spec.X_[1,:,0], 'r-', label='real')
plt.plot(spec.X_[1,:,1], 'b-', label='imag')
plt.plot(np.abs(spec.X_[1,:,0] + 1j*spec.X_[1,:,1]), 'k--', label='abs')
plt.legend(loc='best')
plt.show()
|
sheabrown/faraday_complexity
|
final/possum.py
|
Python
|
mit
| 4,428
|
[
"Gaussian"
] |
346f0cef857aa26869ecc1dd12410ddb1f34abcda4913bf5e061b178ba2f2fee
|
"""This script includes things common to the other included Python scripts
"""
import math
import os
import sys
#Constants as defined in ForceManII
ang2au=1.889725989
j2cal=4.184
kcalmol2au=1/627.5096
deg2rad=math.pi/180
k2au=kcalmol2au/(ang2au**2)
anglek2au=kcalmol2au
#Recognized param types
params={"k":"FManII::Param_t::K",
"r0":"FManII::Param_t::r0",
"v":"FManII::Param_t::amp",
"phi":"FManII::Param_t::phi",
"n":"FManII::Param_t::n",
"q":"FManII::Param_t::q",
"sigma":"FManII::Param_t::sigma",
"epsilon":"FManII::Param_t::epsilon"
}
#Recognized internal coordinate types
intcoords={"bond":"FManII::IntCoord_t::BOND",
"pair13":"FManII::IntCoord_t::PAIR13",
"pair14":"FManII::IntCoord_t::PAIR14",
"pair":"FManII::IntCoord_t::PAIR",
"angle":"FManII::IntCoord_t::ANGLE",
"torsion":"FManII::IntCoord_t::TORSION",
"imp":"FManII::IntCoord_t::IMPTORSION",
}
#Recognized model types
models={"ho":"FManII::Model_t::HARMONICOSCILLATOR",
"fs":"FManII::Model_t::FOURIERSERIES",
"cl":"FManII::Model_t::ELECTROSTATICS",
"lj":"FManII::Model_t::LENNARD_JONES",
}
#Recognized ways of combining parameters
comb_rules={"ARITHMETIC":"FManII::mean",
"GEOMETRIC":"FManII::geometric",
"PRODUCT":"FManII::product"}
#Recognized atom type mappings
typetypes={"type":"FManII::TypeTypes_t::TYPE",
"class":"FManII::TypeTypes_t::CLASS"
}
#Recognized force field terms
ffterms={"hb":(models["ho"],intcoords["bond"]),
"ha":(models["ho"],intcoords["angle"]),
"hi":(models["ho"],intcoords["imp"]),
"ft":(models["fs"],intcoords["torsion"]),
"fi":(models["fs"],intcoords["imp"]),
"cl14":(models["cl"],intcoords["pair14"]),
"cl":(models["cl"],intcoords["pair"]),
"lj14":(models["lj"],intcoords["pair14"]),
"lj":(models["lj"],intcoords["pair"]),
"ub":(models["ho"],intcoords["pair13"])}
def check(cond,msg):
if not cond: raise RuntimeError(msg)
def cross(v1,v2):
"""Defines cross product so we don't depend on numpy"""
return [v1[1]*v2[2]-v1[2]*v2[1],
v1[2]*v2[0]-v1[0]*v2[2],
v1[0]*v2[1]-v1[1]*v2[0]]
def dot(v1,v2):
"""Defines dot product so we don't depend on numpy"""
return v1[0]*v2[0]+v1[1]*v2[1]+v1[2]*v2[2]
def diff(v1,v2):
"""Defines the difference between two vectors"""
return [v1[k]-v2[k] for k in range(0,3)]
def mag(v1):
"""Magnitude of a vector"""
return math.sqrt(sum(v1[k]**2 for k in range(0,3)))
def read_sys(xyz_file):
"""Given the path to a Tinker-style .xyz file reads the coordinates,
connectivity, and parameter types into lists. These lists are then
returned"""
carts=[];connect=[];param_num=[]
with open(xyz_file,"r") as f:
next(f)
for line in f:
tokenized=line.split()
carts.append([float(i)*ang2au for i in tokenized[2:5]])
param_num.append(int(tokenized[5]))
connect.append([int(i)-1 for i in tokenized[6:]])#Tinker starts at 1
return carts,connect,param_num
|
ryanmrichard/ForceManII
|
bin/FManII.py
|
Python
|
lgpl-3.0
| 3,164
|
[
"TINKER"
] |
7f8ca7ecd51791c6925b585b314c45fdffba4a79cffdb2d4ba30d1dc13e90df4
|
# This file is part of Merlin.
# Merlin is the Copyright (C)2008,2009,2010 of Robin K. Hansen, Elliot Rosemarine, Andreas Jacobsen.
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import sys, time, urllib2, shutil, os, errno
# ########################################################################### #
# ############################## NOTICE ############################# #
# ########################################################################### #
# #
# This file is based on excalibur, and is used to provide a standalone set of #
# tick dumps (without a bot). #
# #
# This script is *NOT* part of the normal operation of merlin. #
# #
# ########################################################################### #
# ############################## CONFIG ############################# #
# ########################################################################### #
base_url = "http://game.planetarion.com/botfiles/"
alt_base = "http://dumps.dfwtk.com/"
useragent = "Dumper (Python-urllib/%s); Admin/YOUR_IRC_NICK_HERE" % (urllib2.__version__)
# ########################################################################### #
# ########################################################################### #
# From http://www.diveintopython.net/http_web_services/etags.html
class DefaultErrorHandler(urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
result = urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp)
result.status = code
return result
class botfile:
def __init__(self, page):
self.header = {}
self.body = []
# Parse header
line = page.readline().strip()
while line:
[field, value] = line.split(": ",1)
if value[0] == "'" and value[-1] == "'":
value = value[1:-1]
self.header[field] = value
line = page.readline().strip()
if self.header.has_key("Tick"):
if self.header["Tick"].isdigit():
self.tick = int(self.header["Tick"])
else:
raise TypeError("Non-numeric tick \"%s\" found." % self.header["Tick"])
else:
raise TypeError("No tick information found.")
if not self.header.has_key("Separator"):
self.header["Separator"] = "\t"
if not self.header.has_key("EOF"):
self.header["EOF"] = None
line = page.readline().strip()
while line != self.header["EOF"]:
self.body.append(line)
line = page.readline().strip()
def __iter__(self):
return iter(self.body)
def get_dumps(last_tick, etag, modified, alt=False):
global base_url, alt_base, useragent
if alt:
purl = alt_base + str(last_tick+1) + "/planet_listing.txt"
gurl = alt_base + str(last_tick+1) + "/galaxy_listing.txt"
aurl = alt_base + str(last_tick+1) + "/alliance_listing.txt"
furl = alt_base + str(last_tick+1) + "/user_feed.txt"
else:
purl = base_url + "planet_listing.txt"
gurl = base_url + "galaxy_listing.txt"
aurl = base_url + "alliance_listing.txt"
furl = base_url + "user_feed.txt"
# Build the request for planet data
req = urllib2.Request(purl)
if etag:
req.add_header('If-None-Match', etag)
if modified:
req.add_header('If-Modified-Since', modified)
if useragent:
req.add_header('User-Agent', useragent)
opener = urllib2.build_opener(DefaultErrorHandler())
pdump = opener.open(req)
try:
if pdump.status == 304:
print "Dump files not modified. Waiting..."
time.sleep(60)
return (False, False, False, False)
elif pdump.status == 404 and last_tick < alt:
# Dumps are missing from archive. Check for dumps for next tick
print "Dump files missing. Looking for newer..."
return get_dumps(last_tick+1, etag, modified, alt)
else:
print "Error: %s" % pdump.status
time.sleep(120)
return (False, False, False, False)
except AttributeError:
pass
# Open the dump files
try:
req = urllib2.Request(gurl)
req.add_header('User-Agent', useragent)
gdump = opener.open(req)
if gdump.info().status:
print "Error loading galaxy listing. Trying again in 2 minutes..."
time.sleep(120)
return (False, False, False, False)
req = urllib2.Request(aurl)
req.add_header('User-Agent', useragent)
adump = opener.open(req)
if adump.info().status:
print "Error loading alliance listing. Trying again in 2 minutes..."
time.sleep(120)
return (False, False, False, False)
req = urllib2.Request(furl)
req.add_header('User-Agent', useragent)
udump = opener.open(req)
if udump.info().status:
if alt:
print "Error loading user feed. Ignoring."
udump = None
else:
print "Error loading user feed. Trying again in 2 minutes..."
time.sleep(120)
return (False, False, False, False)
except Exception, e:
print "Failed gathering dump files.\n%s" % (str(e),)
time.sleep(300)
return (False, False, False, False)
else:
return (pdump, gdump, adump, udump)
def checktick(planets, galaxies, alliances, userfeed):
if not planets.tick:
print "Bad planet dump"
time.sleep(120)
return False
print "Planet dump for tick %s" % (planets.tick)
if not galaxies.tick:
print "Bad galaxy dump"
time.sleep(120)
return False
print "Galaxy dump for tick %s" % (galaxies.tick)
if not alliances.tick:
print "Bad alliance dump"
time.sleep(120)
return False
print "Alliance dump for tick %s" % (alliances.tick)
# As above
if userfeed:
if not userfeed.tick:
print "Bad userfeed dump"
time.sleep(120)
return False
print "UserFeed dump for tick %s" % (userfeed.tick)
# Check the ticks of the dumps are all the same and that it's
# greater than the previous tick, i.e. a new tick
if not ((planets.tick == galaxies.tick == alliances.tick) and ((not userfeed) or planets.tick == userfeed.tick)):
print "Varying ticks found, sleeping\nPlanet: %s, Galaxy: %s, Alliance: %s, UserFeed: %s" % (planets.tick, galaxies.tick, alliances.tick, userfeed.tick if userfeed else "N/A")
time.sleep(30)
return False
return True
def load_config():
if os.path.isfile("dump_info"):
info = open("dump_info", "r+")
last_tick = int(info.readline()[:-1] or 0)
etag = info.readline()[:-1]
if etag == "None":
etag = None
modified = info.readline()[:-1]
if modified == "None":
modified = None
info.seek(0)
else:
info = open("dump_info", "w")
last_tick = 0
etag = None
modified = None
return (info, last_tick, etag, modified)
def ticker(alt=False):
t_start=time.time()
t1=t_start
(info, last_tick, etag, modified) = load_config()
while True:
try:
# How long has passed since starting?
# If 55 mins, we're not likely getting dumps this tick, so quit
if (time.time() - t_start) >= (55 * 60):
print "55 minutes without a successful dump, giving up!"
info.close()
sys.exit()
(pdump, gdump, adump, udump) = get_dumps(last_tick, etag, modified, alt)
if not pdump:
continue
# Get header information now, as the headers will be lost if we save dumps
etag = pdump.headers.get("ETag")
modified = pdump.headers.get("Last-Modified")
try:
os.makedirs("dumps/%s" % (last_tick+1,))
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Open dump files
pf = open("dumps/%s/planet_listing.txt" % (last_tick+1,), "w+")
gf = open("dumps/%s/galaxy_listing.txt" % (last_tick+1,), "w+")
af = open("dumps/%s/alliance_listing.txt" % (last_tick+1,), "w+")
uf = open("dumps/%s/user_feed.txt" % (last_tick+1,), "w+")
# Copy dump contents
shutil.copyfileobj(pdump, pf)
shutil.copyfileobj(gdump, gf)
shutil.copyfileobj(adump, af)
shutil.copyfileobj(udump, uf)
# Return to the start of the file
pf.seek(0)
gf.seek(0)
af.seek(0)
uf.seek(0)
# Swap pointers
pdump = pf
gdump = gf
adump = af
udump = uf
# Parse botfile headers
try:
planets = botfile(pdump)
galaxies = botfile(gdump)
alliances = botfile(adump)
userfeed = botfile(udump) if udump else None
except TypeError as e:
print "Error: %s" % e
time.sleep(60)
continue
if not checktick(planets, galaxies, alliances, userfeed):
continue
if not planets.tick > last_tick:
if planets.tick < last_tick - 5:
print "Looks like a new round. Giving up."
return False
print "Stale ticks found, sleeping"
time.sleep(60)
continue
t2=time.time()-t1
print "Loaded dumps from webserver in %.3f seconds" % (t2,)
t1=time.time()
if planets.tick > last_tick + 1:
if not alt:
print "Missing ticks. Switching to alternative url...."
ticker(planets.tick-1)
(info, last_tick, etag, modified) = load_config()
continue
if planets.tick > alt:
print "Something is very, very wrong..."
continue
info.write(str(planets.tick)+"\n"+str(etag)+"\n"+str(modified)+"\n")
info.flush()
info.seek(0)
if planets.tick < alt:
print "Seen:%s Target:%s" %(planets.tick,alt)
print "Still some ticks missing... (waiting 10 seconds)"
time.sleep(10)
ticker(alt)
break
except Exception, e:
print "Something random went wrong, sleeping for 15 seconds to hope it improves: %s" % (str(e),)
time.sleep(15)
continue
info.close()
t1=time.time()-t_start
print "Total time taken: %.3f seconds" % (t1,)
return planets.tick
print "Dumping from %s" % (base_url,)
ticker()
|
d7415/merlin
|
dumper.py
|
Python
|
gpl-2.0
| 12,137
|
[
"Galaxy"
] |
7873c66c8acd851dacabbed16b9dd5bb4f912b10b183287690398b504f6db813
|
"""
Tests for generalized linear models. Majority of code either directly borrowed
or closely adapted from statsmodels package. Model results verfiied using glm
function in R and GLM function in statsmodels.
"""
__author__ = 'Taylor Oshan tayoshan@gmail.com'
from pysal.contrib.glm.glm import GLM
from pysal.contrib.glm.family import Gaussian, Poisson, Binomial, QuasiPoisson
import numpy as np
import pysal
import unittest
import math
class TestGaussian(unittest.TestCase):
"""
Tests for Poisson GLM
"""
def setUp(self):
db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
def testIWLS(self):
model = GLM(self.y, self.X, family=Gaussian())
results = model.fit()
self.assertAlmostEqual(results.n, 49)
self.assertAlmostEqual(results.df_model, 2)
self.assertAlmostEqual(results.df_resid, 46)
self.assertAlmostEqual(results.aic, 408.73548964604873)
self.assertAlmostEqual(results.bic, 10467.991340493107)
self.assertAlmostEqual(results.deviance, 10647.015074206196)
self.assertAlmostEqual(results.llf, -201.36774482302437)
self.assertAlmostEqual(results.null_deviance, 16367.794631703124)
self.assertAlmostEqual(results.scale, 231.45684943926514)
np.testing.assert_allclose(results.params, [ 46.42818268, 0.62898397,
-0.48488854])
np.testing.assert_allclose(results.bse, [ 13.19175703, 0.53591045,
0.18267291])
np.testing.assert_allclose(results.cov_params(),
[[ 1.74022453e+02, -6.52060364e+00, -2.15109867e+00],
[ -6.52060364e+00, 2.87200008e-01, 6.80956787e-02],
[ -2.15109867e+00, 6.80956787e-02, 3.33693910e-02]])
np.testing.assert_allclose(results.tvalues, [ 3.51948437, 1.17367365,
-2.65440864])
np.testing.assert_allclose(results.pvalues, [ 0.00043239, 0.24052577,
0.00794475], atol=1.0e-8)
np.testing.assert_allclose(results.conf_int(),
[[ 20.57281401, 72.28355135],
[ -0.42138121, 1.67934915],
[ -0.84292086, -0.12685622]])
np.testing.assert_allclose(results.normalized_cov_params,
[[ 7.51857004e-01, -2.81720055e-02, -9.29373521e-03],
[ -2.81720055e-02, 1.24083607e-03, 2.94204638e-04],
[ -9.29373521e-03, 2.94204638e-04, 1.44171110e-04]])
np.testing.assert_allclose(results.mu,
[ 51.08752105, 50.66601521, 41.61367567, 33.53969014,
28.90638232, 43.87074227, 51.64910882, 34.92671563,
42.69267622, 38.49449134, 20.92815471, 25.25228436,
29.78223486, 25.02403635, 29.07959539, 24.63352275,
34.71372149, 33.40443052, 27.29864225, 65.86219802,
33.69854751, 37.44976435, 50.01304928, 36.81219959,
22.02674837, 31.64775955, 27.63563294, 23.7697291 ,
22.43119725, 21.76987089, 48.51169321, 49.05891819,
32.31656426, 44.20550354, 35.49244888, 51.27811308,
36.55047181, 27.37048914, 48.78812922, 57.31744163,
51.22914162, 54.70515578, 37.06622277, 44.5075759 ,
41.24328983, 49.93821824, 44.85644299, 40.93838609, 47.32045464])
self.assertAlmostEqual(results.pearson_chi2, 10647.015074206196)
np.testing.assert_allclose(results.resid_response,
[ 29.37948195, -6.09901421, -15.26367567, -0.33968914,
-5.68138232, -15.12074227, 23.35089118, 2.19828437,
9.90732178, 57.90551066, -1.22815371, -5.35228436,
11.91776614, 17.87596565, -11.07959539, -5.83352375,
7.03627851, 26.59556948, 3.30135775, 15.40479998,
-13.72354751, -6.99976335, -2.28004728, 16.38780141,
-4.12674837, -11.34776055, 6.46436506, -0.9197291 ,
10.06880275, 0.73012911, -16.71169421, -8.75891919,
-8.71656426, -15.75550254, -8.49244888, -14.97811408,
6.74952719, -4.67048814, -9.18813122, 4.63255937,
-9.12914362, -10.37215578, -11.36622177, -11.0075759 ,
-13.51028983, 26.16177976, -2.35644299, -14.13838709, -11.52045564])
np.testing.assert_allclose(results.resid_working,
[ 29.37948195, -6.09901421, -15.26367567, -0.33968914,
-5.68138232, -15.12074227, 23.35089118, 2.19828437,
9.90732178, 57.90551066, -1.22815371, -5.35228436,
11.91776614, 17.87596565, -11.07959539, -5.83352375,
7.03627851, 26.59556948, 3.30135775, 15.40479998,
-13.72354751, -6.99976335, -2.28004728, 16.38780141,
-4.12674837, -11.34776055, 6.46436506, -0.9197291 ,
10.06880275, 0.73012911, -16.71169421, -8.75891919,
-8.71656426, -15.75550254, -8.49244888, -14.97811408,
6.74952719, -4.67048814, -9.18813122, 4.63255937,
-9.12914362, -10.37215578, -11.36622177, -11.0075759 ,
-13.51028983, 26.16177976, -2.35644299, -14.13838709, -11.52045564])
np.testing.assert_allclose(results.resid_pearson,
[ 29.37948195, -6.09901421, -15.26367567, -0.33968914,
-5.68138232, -15.12074227, 23.35089118, 2.19828437,
9.90732178, 57.90551066, -1.22815371, -5.35228436,
11.91776614, 17.87596565, -11.07959539, -5.83352375,
7.03627851, 26.59556948, 3.30135775, 15.40479998,
-13.72354751, -6.99976335, -2.28004728, 16.38780141,
-4.12674837, -11.34776055, 6.46436506, -0.9197291 ,
10.06880275, 0.73012911, -16.71169421, -8.75891919,
-8.71656426, -15.75550254, -8.49244888, -14.97811408,
6.74952719, -4.67048814, -9.18813122, 4.63255937,
-9.12914362, -10.37215578, -11.36622177, -11.0075759 ,
-13.51028983, 26.16177976, -2.35644299, -14.13838709, -11.52045564])
np.testing.assert_allclose(results.resid_anscombe,
[ 29.37948195, -6.09901421, -15.26367567, -0.33968914,
-5.68138232, -15.12074227, 23.35089118, 2.19828437,
9.90732178, 57.90551066, -1.22815371, -5.35228436,
11.91776614, 17.87596565, -11.07959539, -5.83352375,
7.03627851, 26.59556948, 3.30135775, 15.40479998,
-13.72354751, -6.99976335, -2.28004728, 16.38780141,
-4.12674837, -11.34776055, 6.46436506, -0.9197291 ,
10.06880275, 0.73012911, -16.71169421, -8.75891919,
-8.71656426, -15.75550254, -8.49244888, -14.97811408,
6.74952719, -4.67048814, -9.18813122, 4.63255937,
-9.12914362, -10.37215578, -11.36622177, -11.0075759 ,
-13.51028983, 26.16177976, -2.35644299, -14.13838709, -11.52045564])
np.testing.assert_allclose(results.resid_deviance,
[ 29.37948195, -6.09901421, -15.26367567, -0.33968914,
-5.68138232, -15.12074227, 23.35089118, 2.19828437,
9.90732178, 57.90551066, -1.22815371, -5.35228436,
11.91776614, 17.87596565, -11.07959539, -5.83352375,
7.03627851, 26.59556948, 3.30135775, 15.40479998,
-13.72354751, -6.99976335, -2.28004728, 16.38780141,
-4.12674837, -11.34776055, 6.46436506, -0.9197291 ,
10.06880275, 0.73012911, -16.71169421, -8.75891919,
-8.71656426, -15.75550254, -8.49244888, -14.97811408,
6.74952719, -4.67048814, -9.18813122, 4.63255937,
-9.12914362, -10.37215578, -11.36622177, -11.0075759 ,
-13.51028983, 26.16177976, -2.35644299, -14.13838709, -11.52045564])
np.testing.assert_allclose(results.null,
[ 38.43622447, 38.43622447, 38.43622447, 38.43622447,
38.43622447, 38.43622447, 38.43622447, 38.43622447,
38.43622447, 38.43622447, 38.43622447, 38.43622447,
38.43622447, 38.43622447, 38.43622447, 38.43622447,
38.43622447, 38.43622447, 38.43622447, 38.43622447,
38.43622447, 38.43622447, 38.43622447, 38.43622447,
38.43622447, 38.43622447, 38.43622447, 38.43622447,
38.43622447, 38.43622447, 38.43622447, 38.43622447,
38.43622447, 38.43622447, 38.43622447, 38.43622447,
38.43622447, 38.43622447, 38.43622447, 38.43622447,
38.43622447, 38.43622447, 38.43622447, 38.43622447,
38.43622447, 38.43622447, 38.43622447, 38.43622447, 38.43622447])
self.assertAlmostEqual(results.D2, .349514377851)
self.assertAlmostEqual(results.adj_D2, 0.32123239427957673)
class TestPoisson(unittest.TestCase):
def setUp(self):
db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
y = np.array(db.by_col("HOVAL"))
y = np.reshape(y, (49,1))
self.y = np.round(y).astype(int)
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
def testIWLS(self):
model = GLM(self.y, self.X, family=Poisson())
results = model.fit()
self.assertAlmostEqual(results.n, 49)
self.assertAlmostEqual(results.df_model, 2)
self.assertAlmostEqual(results.df_resid, 46)
self.assertAlmostEqual(results.aic, 500.85184179938756)
self.assertAlmostEqual(results.bic, 51.436404535087661)
self.assertAlmostEqual(results.deviance, 230.46013824817649)
self.assertAlmostEqual(results.llf, -247.42592089969378)
self.assertAlmostEqual(results.null_deviance, 376.97293610347361)
self.assertAlmostEqual(results.scale, 1.0)
np.testing.assert_allclose(results.params, [ 3.92159085, 0.01183491,
-0.01371397], atol=1.0e-8)
np.testing.assert_allclose(results.bse, [ 0.13049161, 0.00511599,
0.00193769], atol=1.0e-8)
np.testing.assert_allclose(results.cov_params(),
[[ 1.70280610e-02, -6.18628383e-04, -2.21386966e-04],
[ -6.18628383e-04, 2.61733917e-05, 6.77496445e-06],
[ -2.21386966e-04, 6.77496445e-06, 3.75463502e-06]])
np.testing.assert_allclose(results.tvalues, [ 30.0524361 , 2.31331634,
-7.07748998])
np.testing.assert_allclose(results.pvalues, [ 2.02901657e-198,
2.07052532e-002, 1.46788805e-012])
np.testing.assert_allclose(results.conf_int(),
[[ 3.66583199e+00, 4.17734972e+00],
[ 1.80774841e-03, 2.18620753e-02],
[ -1.75117666e-02, -9.91616901e-03]])
np.testing.assert_allclose(results.normalized_cov_params,
[[ 1.70280610e-02, -6.18628383e-04, -2.21386966e-04],
[ -6.18628383e-04, 2.61733917e-05, 6.77496445e-06],
[ -2.21386966e-04, 6.77496445e-06, 3.75463502e-06]])
np.testing.assert_allclose(results.mu,
[ 51.26831574, 50.15022766, 40.06142973, 34.13799739,
28.76119226, 42.6836241 , 55.64593703, 34.08277997,
40.90389582, 37.19727958, 23.47459217, 26.12384057,
29.78303507, 25.96888223, 29.14073823, 26.04369592,
34.18996367, 32.28924005, 27.42284396, 72.69207879,
33.05316347, 36.52276972, 49.2551479 , 35.33439632,
24.07252457, 31.67153709, 27.81699478, 25.38021219,
24.31759259, 23.13586161, 48.40724678, 48.57969818,
31.92596006, 43.3679231 , 34.32925819, 51.78908089,
34.49778584, 27.56236198, 48.34273194, 57.50829097,
50.66038226, 54.68701352, 35.77103116, 43.21886784,
40.07615759, 49.98658004, 43.13352883, 40.28520774, 46.28910294])
self.assertAlmostEqual(results.pearson_chi2, 264.62262932090221)
np.testing.assert_allclose(results.resid_response,
[ 28.73168426, -5.15022766, -14.06142973, -1.13799739,
-5.76119226, -13.6836241 , 19.35406297, 2.91722003,
12.09610418, 58.80272042, -3.47459217, -6.12384057,
12.21696493, 17.03111777, -11.14073823, -7.04369592,
7.81003633, 27.71075995, 3.57715604, 8.30792121,
-13.05316347, -6.52276972, -1.2551479 , 17.66560368,
-6.07252457, -11.67153709, 6.18300522, -2.38021219,
7.68240741, -1.13586161, -16.40724678, -8.57969818,
-7.92596006, -15.3679231 , -7.32925819, -15.78908089,
8.50221416, -4.56236198, -8.34273194, 4.49170903,
-8.66038226, -10.68701352, -9.77103116, -9.21886784,
-12.07615759, 26.01341996, -1.13352883, -13.28520774, -10.28910294])
np.testing.assert_allclose(results.resid_working,
[ 1473.02506034, -258.28508941, -563.32097891, -38.84895192,
-165.69875817, -584.06666725, 1076.97496919, 99.42696848,
494.77778514, 2187.30123163, -81.56463405, -159.97823479,
363.858295 , 442.27909165, -324.64933645, -183.44387481,
267.02485844, 894.75938 , 98.09579187, 603.9200634 ,
-431.44834594, -238.2296165 , -61.82249568, 624.20344168,
-146.18099686, -369.65551968, 171.99262399, -60.41029031,
186.81765356, -26.27913713, -794.22964417, -416.79914795,
-253.04388425, -666.47490701, -251.6079969 , -817.70198717,
293.30756327, -125.74947222, -403.31045369, 258.31051005,
-438.73827602, -584.440853 , -349.51985996, -398.42903071,
-483.96599444, 1300.32189904, -48.89309853, -535.19735391,
-476.27334527])
np.testing.assert_allclose(results.resid_pearson,
[ 4.01269878, -0.72726045, -2.221602 , -0.19477008, -1.07425881,
-2.09445239, 2.59451042, 0.49969118, 1.89131202, 9.64143836,
-0.71714142, -1.19813392, 2.23861212, 3.34207756, -2.0637814 ,
-1.3802231 , 1.33568403, 4.87662684, 0.68309584, 0.97442591,
-2.27043598, -1.07931992, -0.17884182, 2.97186889, -1.23768025,
-2.07392709, 1.1723155 , -0.47246327, 1.55789092, -0.23614708,
-2.35819937, -1.23096188, -1.40274877, -2.33362391, -1.25091503,
-2.19400568, 1.44755952, -0.8690235 , -1.19989348, 0.59230634,
-1.21675413, -1.44515442, -1.63370888, -1.40229988, -1.90759306,
3.67934693, -0.17259375, -2.09312684, -1.51230062])
np.testing.assert_allclose(results.resid_anscombe,
[ 3.70889134, -0.74031295, -2.37729865, -0.19586855, -1.11374751,
-2.22611959, 2.46352013, 0.49282126, 1.80857757, 8.06444452,
-0.73610811, -1.25061371, 2.10820431, 3.05467547, -2.22437611,
-1.45136173, 1.28939698, 4.35942058, 0.66904552, 0.95674923,
-2.45438937, -1.11429881, -0.17961012, 2.76715848, -1.29658591,
-2.22816691, 1.13269136, -0.48017382, 1.48562248, -0.23812278,
-2.51664399, -1.2703721 , -1.4683091 , -2.49907536, -1.30026484,
-2.32398309, 1.39380683, -0.89495368, -1.23735395, 0.58485202,
-1.25435224, -1.4968484 , -1.71888038, -1.45756652, -2.01906267,
3.41729922, -0.17335867, -2.22921828, -1.57470549])
np.testing.assert_allclose(results.resid_deviance,
[ 3.70529668, -0.74027329, -2.37536322, -0.19586751, -1.11349765,
-2.22466106, 2.46246446, 0.4928057 , 1.80799655, 8.02696525,
-0.73602255, -1.25021555, 2.10699958, 3.05084608, -2.22214376,
-1.45072221, 1.28913747, 4.35106213, 0.6689982 , 0.95669662,
-2.45171913, -1.11410444, -0.17960956, 2.76494217, -1.29609865,
-2.22612429, 1.13247453, -0.48015254, 1.48508549, -0.23812 ,
-2.51476072, -1.27015583, -1.46777697, -2.49699318, -1.29992892,
-2.32263069, 1.39348459, -0.89482132, -1.23715363, 0.58483655,
-1.25415329, -1.49653039, -1.7181055 , -1.45719072, -2.01791949,
3.41437156, -0.1733581 , -2.22765605, -1.57426046])
np.testing.assert_allclose(results.null,
[ 38.42857143, 38.42857143, 38.42857143, 38.42857143,
38.42857143, 38.42857143, 38.42857143, 38.42857143,
38.42857143, 38.42857143, 38.42857143, 38.42857143,
38.42857143, 38.42857143, 38.42857143, 38.42857143,
38.42857143, 38.42857143, 38.42857143, 38.42857143,
38.42857143, 38.42857143, 38.42857143, 38.42857143,
38.42857143, 38.42857143, 38.42857143, 38.42857143,
38.42857143, 38.42857143, 38.42857143, 38.42857143,
38.42857143, 38.42857143, 38.42857143, 38.42857143,
38.42857143, 38.42857143, 38.42857143, 38.42857143,
38.42857143, 38.42857143, 38.42857143, 38.42857143,
38.42857143, 38.42857143, 38.42857143, 38.42857143, 38.42857143])
self.assertAlmostEqual(results.D2, .388656011675)
self.assertAlmostEqual(results.adj_D2, 0.36207583826952761)#.375648692774)
def testQuasi(self):
model = GLM(self.y, self.X, family=QuasiPoisson())
results = model.fit()
self.assertAlmostEqual(results.n, 49)
self.assertAlmostEqual(results.df_model, 2)
self.assertAlmostEqual(results.df_resid, 46)
self.assertTrue(math.isnan(results.aic))
self.assertAlmostEqual(results.bic, 51.436404535087661)
self.assertAlmostEqual(results.deviance, 230.46013824817649)
self.assertTrue(math.isnan(results.llf))
self.assertAlmostEqual(results.null_deviance, 376.97293610347361)
self.assertAlmostEqual(results.scale, 5.7526658548022223)
np.testing.assert_allclose(results.params, [ 3.92159085, 0.01183491,
-0.01371397], atol=1.0e-8)
np.testing.assert_allclose(results.bse, [ 0.31298042, 0.01227057,
0.00464749], atol=1.0e-8)
np.testing.assert_allclose(results.cov_params(),
[[ 9.79567451e-02, -3.55876238e-03, -1.27356524e-03],
[ -3.55876238e-03, 1.50566777e-04, 3.89741067e-05],
[ -1.27356524e-03, 3.89741067e-05, 2.15991606e-05]])
np.testing.assert_allclose(results.tvalues, [ 12.52982796, 0.96449604,
-2.95083339])
np.testing.assert_allclose(results.pvalues, [ 5.12737770e-36,
3.34797291e-01, 3.16917819e-03])
np.testing.assert_allclose(results.conf_int(),
[[ 3.3081605 , 4.53502121],
[-0.01221495, 0.03588478],
[-0.02282288, -0.00460506]], atol=1.0e-8)
np.testing.assert_allclose(results.normalized_cov_params,
[[ 1.70280610e-02, -6.18628383e-04, -2.21386966e-04],
[ -6.18628383e-04, 2.61733917e-05, 6.77496445e-06],
[ -2.21386966e-04, 6.77496445e-06, 3.75463502e-06]])
np.testing.assert_allclose(results.mu,
[ 51.26831574, 50.15022766, 40.06142973, 34.13799739,
28.76119226, 42.6836241 , 55.64593703, 34.08277997,
40.90389582, 37.19727958, 23.47459217, 26.12384057,
29.78303507, 25.96888223, 29.14073823, 26.04369592,
34.18996367, 32.28924005, 27.42284396, 72.69207879,
33.05316347, 36.52276972, 49.2551479 , 35.33439632,
24.07252457, 31.67153709, 27.81699478, 25.38021219,
24.31759259, 23.13586161, 48.40724678, 48.57969818,
31.92596006, 43.3679231 , 34.32925819, 51.78908089,
34.49778584, 27.56236198, 48.34273194, 57.50829097,
50.66038226, 54.68701352, 35.77103116, 43.21886784,
40.07615759, 49.98658004, 43.13352883, 40.28520774, 46.28910294])
self.assertAlmostEqual(results.pearson_chi2, 264.62262932090221)
np.testing.assert_allclose(results.resid_response,
[ 28.73168426, -5.15022766, -14.06142973, -1.13799739,
-5.76119226, -13.6836241 , 19.35406297, 2.91722003,
12.09610418, 58.80272042, -3.47459217, -6.12384057,
12.21696493, 17.03111777, -11.14073823, -7.04369592,
7.81003633, 27.71075995, 3.57715604, 8.30792121,
-13.05316347, -6.52276972, -1.2551479 , 17.66560368,
-6.07252457, -11.67153709, 6.18300522, -2.38021219,
7.68240741, -1.13586161, -16.40724678, -8.57969818,
-7.92596006, -15.3679231 , -7.32925819, -15.78908089,
8.50221416, -4.56236198, -8.34273194, 4.49170903,
-8.66038226, -10.68701352, -9.77103116, -9.21886784,
-12.07615759, 26.01341996, -1.13352883, -13.28520774, -10.28910294])
np.testing.assert_allclose(results.resid_working,
[ 1473.02506034, -258.28508941, -563.32097891, -38.84895192,
-165.69875817, -584.06666725, 1076.97496919, 99.42696848,
494.77778514, 2187.30123163, -81.56463405, -159.97823479,
363.858295 , 442.27909165, -324.64933645, -183.44387481,
267.02485844, 894.75938 , 98.09579187, 603.9200634 ,
-431.44834594, -238.2296165 , -61.82249568, 624.20344168,
-146.18099686, -369.65551968, 171.99262399, -60.41029031,
186.81765356, -26.27913713, -794.22964417, -416.79914795,
-253.04388425, -666.47490701, -251.6079969 , -817.70198717,
293.30756327, -125.74947222, -403.31045369, 258.31051005,
-438.73827602, -584.440853 , -349.51985996, -398.42903071,
-483.96599444, 1300.32189904, -48.89309853, -535.19735391,
-476.27334527])
np.testing.assert_allclose(results.resid_pearson,
[ 4.01269878, -0.72726045, -2.221602 , -0.19477008, -1.07425881,
-2.09445239, 2.59451042, 0.49969118, 1.89131202, 9.64143836,
-0.71714142, -1.19813392, 2.23861212, 3.34207756, -2.0637814 ,
-1.3802231 , 1.33568403, 4.87662684, 0.68309584, 0.97442591,
-2.27043598, -1.07931992, -0.17884182, 2.97186889, -1.23768025,
-2.07392709, 1.1723155 , -0.47246327, 1.55789092, -0.23614708,
-2.35819937, -1.23096188, -1.40274877, -2.33362391, -1.25091503,
-2.19400568, 1.44755952, -0.8690235 , -1.19989348, 0.59230634,
-1.21675413, -1.44515442, -1.63370888, -1.40229988, -1.90759306,
3.67934693, -0.17259375, -2.09312684, -1.51230062])
np.testing.assert_allclose(results.resid_anscombe,
[ 3.70889134, -0.74031295, -2.37729865, -0.19586855, -1.11374751,
-2.22611959, 2.46352013, 0.49282126, 1.80857757, 8.06444452,
-0.73610811, -1.25061371, 2.10820431, 3.05467547, -2.22437611,
-1.45136173, 1.28939698, 4.35942058, 0.66904552, 0.95674923,
-2.45438937, -1.11429881, -0.17961012, 2.76715848, -1.29658591,
-2.22816691, 1.13269136, -0.48017382, 1.48562248, -0.23812278,
-2.51664399, -1.2703721 , -1.4683091 , -2.49907536, -1.30026484,
-2.32398309, 1.39380683, -0.89495368, -1.23735395, 0.58485202,
-1.25435224, -1.4968484 , -1.71888038, -1.45756652, -2.01906267,
3.41729922, -0.17335867, -2.22921828, -1.57470549])
np.testing.assert_allclose(results.resid_deviance,
[ 3.70529668, -0.74027329, -2.37536322, -0.19586751, -1.11349765,
-2.22466106, 2.46246446, 0.4928057 , 1.80799655, 8.02696525,
-0.73602255, -1.25021555, 2.10699958, 3.05084608, -2.22214376,
-1.45072221, 1.28913747, 4.35106213, 0.6689982 , 0.95669662,
-2.45171913, -1.11410444, -0.17960956, 2.76494217, -1.29609865,
-2.22612429, 1.13247453, -0.48015254, 1.48508549, -0.23812 ,
-2.51476072, -1.27015583, -1.46777697, -2.49699318, -1.29992892,
-2.32263069, 1.39348459, -0.89482132, -1.23715363, 0.58483655,
-1.25415329, -1.49653039, -1.7181055 , -1.45719072, -2.01791949,
3.41437156, -0.1733581 , -2.22765605, -1.57426046])
np.testing.assert_allclose(results.null,
[ 38.42857143, 38.42857143, 38.42857143, 38.42857143,
38.42857143, 38.42857143, 38.42857143, 38.42857143,
38.42857143, 38.42857143, 38.42857143, 38.42857143,
38.42857143, 38.42857143, 38.42857143, 38.42857143,
38.42857143, 38.42857143, 38.42857143, 38.42857143,
38.42857143, 38.42857143, 38.42857143, 38.42857143,
38.42857143, 38.42857143, 38.42857143, 38.42857143,
38.42857143, 38.42857143, 38.42857143, 38.42857143,
38.42857143, 38.42857143, 38.42857143, 38.42857143,
38.42857143, 38.42857143, 38.42857143, 38.42857143,
38.42857143, 38.42857143, 38.42857143, 38.42857143,
38.42857143, 38.42857143, 38.42857143, 38.42857143, 38.42857143])
self.assertAlmostEqual(results.D2, .388656011675)
self.assertAlmostEqual(results.adj_D2, 0.36207583826952761)
class TestBinomial(unittest.TestCase):
def setUp(self):
#London house price data
#y: 'BATH2'
y = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
self.y = y.reshape((316,1))
#X: 'FLOORSZ'
X = np.array([ 77, 75, 64, 95, 107, 100, 81, 151, 98, 260, 171, 161, 91,
80, 50, 85, 52, 69, 60, 84, 155, 97, 69, 126, 90, 43,
51, 41, 140, 80, 52, 86, 66, 60, 40, 155, 138, 97, 115,
148, 206, 60, 53, 96, 88, 160, 31, 43, 154, 60, 131, 60,
46, 61, 125, 150, 76, 92, 96, 100, 105, 72, 48, 41, 72,
65, 60, 65, 98, 33, 144, 111, 91, 108, 38, 48, 95, 63,
98, 129, 108, 51, 131, 66, 48, 127, 76, 68, 52, 64, 57,
121, 67, 76, 112, 96, 90, 53, 93, 64, 97, 58, 44, 157,
53, 70, 71, 167, 47, 70, 96, 77, 75, 71, 67, 47, 71,
90, 69, 64, 65, 95, 60, 60, 65, 54, 121, 105, 50, 85,
69, 69, 62, 65, 93, 93, 70, 62, 155, 68, 117, 80, 80,
75, 98, 114, 86, 70, 50, 51, 163, 124, 59, 95, 51, 63,
85, 53, 46, 102, 114, 83, 47, 40, 63, 123, 100, 63, 110,
79, 98, 99, 120, 52, 48, 37, 81, 30, 88, 50, 35, 116,
67, 45, 80, 86, 109, 59, 75, 60, 71, 141, 121, 50, 168,
90, 51, 133, 75, 133, 127, 37, 68, 105, 61, 123, 151, 110,
77, 220, 94, 77, 70, 100, 98, 126, 55, 105, 60, 176, 104,
68, 62, 70, 48, 102, 80, 97, 66, 80, 102, 160, 55, 60,
71, 125, 85, 85, 190, 137, 48, 41, 42, 51, 57, 60, 114,
88, 84, 108, 66, 85, 42, 98, 90, 127, 100, 55, 76, 82,
63, 80, 71, 76, 121, 109, 92, 160, 109, 185, 100, 90, 90,
86, 88, 95, 116, 135, 61, 74, 60, 235, 76, 66, 100, 49,
50, 37, 100, 88, 90, 52, 95, 81, 79, 96, 75, 91, 86,
83, 180, 108, 80, 96, 49, 117, 117, 86, 46, 66, 95, 57,
120, 137, 68, 240])
self.X = X.reshape((316,1))
def testIWLS(self):
model = GLM(self.y, self.X, family=Binomial())
results = model.fit()
self.assertAlmostEqual(results.n, 316)
self.assertAlmostEqual(results.df_model, 1)
self.assertAlmostEqual(results.df_resid, 314)
self.assertAlmostEqual(results.aic, 155.19347530342466)
self.assertAlmostEqual(results.bic, -1656.1095797628657)
self.assertAlmostEqual(results.deviance, 151.19347530342466)
self.assertAlmostEqual(results.llf, -75.596737651712331)
self.assertAlmostEqual(results.null_deviance, 189.16038985881212)
self.assertAlmostEqual(results.scale, 1.0)
np.testing.assert_allclose(results.params, [-5.33638276, 0.0287754 ])
np.testing.assert_allclose(results.bse, [ 0.64499904, 0.00518312],
atol=1.0e-8)
np.testing.assert_allclose(results.cov_params(),
[[ 4.16023762e-01, -3.14338457e-03],
[ -3.14338457e-03, 2.68646833e-05]])
np.testing.assert_allclose(results.tvalues, [-8.27347396, 5.55175826])
np.testing.assert_allclose(results.pvalues, [ 1.30111233e-16,
2.82810512e-08])
np.testing.assert_allclose(results.conf_int(),
[[-6.60055765, -4.07220787],
[ 0.01861668, 0.03893412]], atol=1.0e-8)
np.testing.assert_allclose(results.normalized_cov_params,
[[ 4.16023762e-01, -3.14338457e-03],
[ -3.14338457e-03, 2.68646833e-05]])
np.testing.assert_allclose(results.mu,
[ 0.04226237, 0.03999333, 0.02946178, 0.0689636 , 0.09471181,
0.07879431, 0.04717464, 0.27065598, 0.07471691, 0.89522144,
0.39752487, 0.33102718, 0.06192993, 0.04589793, 0.01988679,
0.0526265 , 0.02104007, 0.03386636, 0.02634295, 0.05121018,
0.29396682, 0.07275173, 0.03386636, 0.15307528, 0.06027915,
0.01631789, 0.02045547, 0.01541937, 0.2128508 , 0.04589793,
0.02104007, 0.05407977, 0.0311527 , 0.02634295, 0.01498855,
0.29396682, 0.20336776, 0.07275173, 0.11637537, 0.25395607,
0.64367488, 0.02634295, 0.02164101, 0.07083428, 0.05710047,
0.32468619, 0.01160845, 0.01631789, 0.28803008, 0.02634295,
0.17267234, 0.02634295, 0.01776301, 0.02709115, 0.14938186,
0.26501331, 0.04111287, 0.06362285, 0.07083428, 0.07879431,
0.08989109, 0.03680743, 0.0187955 , 0.01541937, 0.03680743,
0.03029581, 0.02634295, 0.03029581, 0.07471691, 0.01228768,
0.23277197, 0.10505173, 0.06192993, 0.09720799, 0.01416217,
0.0187955 , 0.0689636 , 0.02865003, 0.07471691, 0.16460503,
0.09720799, 0.02045547, 0.17267234, 0.0311527 , 0.0187955 ,
0.15684317, 0.04111287, 0.03293737, 0.02104007, 0.02946178,
0.02421701, 0.1353385 , 0.03203302, 0.04111287, 0.10778798,
0.07083428, 0.06027915, 0.02164101, 0.06535882, 0.02946178,
0.07275173, 0.02490638, 0.01678627, 0.30605146, 0.02164101,
0.03482061, 0.03580075, 0.37030921, 0.0182721 , 0.03482061,
0.07083428, 0.04226237, 0.03999333, 0.03580075, 0.03203302,
0.0182721 , 0.03580075, 0.06027915, 0.03386636, 0.02946178,
0.03029581, 0.0689636 , 0.02634295, 0.02634295, 0.03029581,
0.02225873, 0.1353385 , 0.08989109, 0.01988679, 0.0526265 ,
0.03386636, 0.03386636, 0.02786 , 0.03029581, 0.06535882,
0.06535882, 0.03482061, 0.02786 , 0.29396682, 0.03293737,
0.12242534, 0.04589793, 0.04589793, 0.03999333, 0.07471691,
0.11344884, 0.05407977, 0.03482061, 0.01988679, 0.02045547,
0.34389327, 0.14576223, 0.02561486, 0.0689636 , 0.02045547,
0.02865003, 0.0526265 , 0.02164101, 0.01776301, 0.08307425,
0.11344884, 0.04982997, 0.0182721 , 0.01498855, 0.02865003,
0.14221564, 0.07879431, 0.02865003, 0.10237696, 0.04465416,
0.07471691, 0.07673078, 0.13200634, 0.02104007, 0.0187955 ,
0.01376599, 0.04717464, 0.01128289, 0.05710047, 0.01988679,
0.01300612, 0.11936722, 0.03203302, 0.01726786, 0.04589793,
0.05407977, 0.09976271, 0.02561486, 0.03999333, 0.02634295,
0.03580075, 0.21771181, 0.1353385 , 0.01988679, 0.37704374,
0.06027915, 0.02045547, 0.18104935, 0.03999333, 0.18104935,
0.15684317, 0.01376599, 0.03293737, 0.08989109, 0.02709115,
0.14221564, 0.27065598, 0.10237696, 0.04226237, 0.72991785,
0.06713876, 0.04226237, 0.03482061, 0.07879431, 0.07471691,
0.15307528, 0.02289366, 0.08989109, 0.02634295, 0.43243779,
0.08756457, 0.03293737, 0.02786 , 0.03482061, 0.0187955 ,
0.08307425, 0.04589793, 0.07275173, 0.0311527 , 0.04589793,
0.08307425, 0.32468619, 0.02289366, 0.02634295, 0.03580075,
0.14938186, 0.0526265 , 0.0526265 , 0.53268924, 0.19874565,
0.0187955 , 0.01541937, 0.01586237, 0.02045547, 0.02421701,
0.02634295, 0.11344884, 0.05710047, 0.05121018, 0.09720799,
0.0311527 , 0.0526265 , 0.01586237, 0.07471691, 0.06027915,
0.15684317, 0.07879431, 0.02289366, 0.04111287, 0.04848506,
0.02865003, 0.04589793, 0.03580075, 0.04111287, 0.1353385 ,
0.09976271, 0.06362285, 0.32468619, 0.09976271, 0.49676673,
0.07879431, 0.06027915, 0.06027915, 0.05407977, 0.05710047,
0.0689636 , 0.11936722, 0.18973955, 0.02709115, 0.03890304,
0.02634295, 0.80625182, 0.04111287, 0.0311527 , 0.07879431,
0.0193336 , 0.01988679, 0.01376599, 0.07879431, 0.05710047,
0.06027915, 0.02104007, 0.0689636 , 0.04717464, 0.04465416,
0.07083428, 0.03999333, 0.06192993, 0.05407977, 0.04982997,
0.46087756, 0.09720799, 0.04589793, 0.07083428, 0.0193336 ,
0.12242534, 0.12242534, 0.05407977, 0.01776301, 0.0311527 ,
0.0689636 , 0.02421701, 0.13200634, 0.19874565, 0.03293737,
0.82774282], atol=1.0e-8)
self.assertAlmostEqual(results.pearson_chi2, 271.21110541713801)
np.testing.assert_allclose(results.resid_response,
[-0.04226237, -0.03999333, -0.02946178, -0.0689636 , -0.09471181,
-0.07879431, -0.04717464, -0.27065598, -0.07471691, 0.10477856,
-0.39752487, 0.66897282, -0.06192993, -0.04589793, -0.01988679,
-0.0526265 , -0.02104007, -0.03386636, -0.02634295, -0.05121018,
-0.29396682, 0.92724827, -0.03386636, -0.15307528, -0.06027915,
-0.01631789, -0.02045547, -0.01541937, -0.2128508 , -0.04589793,
-0.02104007, -0.05407977, -0.0311527 , -0.02634295, -0.01498855,
-0.29396682, 0.79663224, -0.07275173, -0.11637537, 0.74604393,
-0.64367488, -0.02634295, -0.02164101, -0.07083428, -0.05710047,
-0.32468619, -0.01160845, -0.01631789, -0.28803008, -0.02634295,
-0.17267234, -0.02634295, -0.01776301, -0.02709115, 0.85061814,
0.73498669, -0.04111287, -0.06362285, -0.07083428, -0.07879431,
0.91010891, -0.03680743, -0.0187955 , -0.01541937, -0.03680743,
-0.03029581, -0.02634295, -0.03029581, -0.07471691, -0.01228768,
0.76722803, -0.10505173, -0.06192993, -0.09720799, -0.01416217,
-0.0187955 , -0.0689636 , -0.02865003, -0.07471691, -0.16460503,
-0.09720799, -0.02045547, 0.82732766, -0.0311527 , -0.0187955 ,
-0.15684317, -0.04111287, -0.03293737, -0.02104007, -0.02946178,
-0.02421701, -0.1353385 , -0.03203302, -0.04111287, -0.10778798,
-0.07083428, -0.06027915, -0.02164101, -0.06535882, -0.02946178,
-0.07275173, -0.02490638, -0.01678627, -0.30605146, -0.02164101,
-0.03482061, -0.03580075, 0.62969079, -0.0182721 , -0.03482061,
-0.07083428, -0.04226237, -0.03999333, -0.03580075, -0.03203302,
-0.0182721 , -0.03580075, -0.06027915, -0.03386636, -0.02946178,
-0.03029581, -0.0689636 , -0.02634295, -0.02634295, -0.03029581,
-0.02225873, -0.1353385 , -0.08989109, -0.01988679, -0.0526265 ,
-0.03386636, -0.03386636, -0.02786 , -0.03029581, -0.06535882,
-0.06535882, -0.03482061, -0.02786 , -0.29396682, -0.03293737,
-0.12242534, -0.04589793, -0.04589793, -0.03999333, -0.07471691,
-0.11344884, -0.05407977, -0.03482061, -0.01988679, -0.02045547,
0.65610673, 0.85423777, -0.02561486, -0.0689636 , -0.02045547,
-0.02865003, -0.0526265 , -0.02164101, -0.01776301, -0.08307425,
-0.11344884, -0.04982997, -0.0182721 , -0.01498855, -0.02865003,
-0.14221564, -0.07879431, -0.02865003, -0.10237696, -0.04465416,
-0.07471691, -0.07673078, -0.13200634, -0.02104007, -0.0187955 ,
-0.01376599, -0.04717464, -0.01128289, 0.94289953, -0.01988679,
-0.01300612, -0.11936722, -0.03203302, -0.01726786, -0.04589793,
-0.05407977, -0.09976271, -0.02561486, -0.03999333, -0.02634295,
-0.03580075, -0.21771181, 0.8646615 , -0.01988679, 0.62295626,
-0.06027915, -0.02045547, -0.18104935, 0.96000667, -0.18104935,
-0.15684317, -0.01376599, -0.03293737, -0.08989109, -0.02709115,
-0.14221564, 0.72934402, -0.10237696, -0.04226237, -0.72991785,
-0.06713876, -0.04226237, -0.03482061, -0.07879431, -0.07471691,
-0.15307528, 0.97710634, 0.91010891, -0.02634295, -0.43243779,
-0.08756457, -0.03293737, -0.02786 , -0.03482061, -0.0187955 ,
0.91692575, -0.04589793, -0.07275173, -0.0311527 , -0.04589793,
-0.08307425, 0.67531381, -0.02289366, -0.02634295, -0.03580075,
-0.14938186, -0.0526265 , -0.0526265 , 0.46731076, -0.19874565,
-0.0187955 , -0.01541937, -0.01586237, -0.02045547, -0.02421701,
-0.02634295, -0.11344884, -0.05710047, -0.05121018, -0.09720799,
0.9688473 , -0.0526265 , -0.01586237, -0.07471691, -0.06027915,
-0.15684317, -0.07879431, -0.02289366, -0.04111287, -0.04848506,
-0.02865003, -0.04589793, -0.03580075, -0.04111287, -0.1353385 ,
-0.09976271, -0.06362285, 0.67531381, -0.09976271, -0.49676673,
-0.07879431, -0.06027915, -0.06027915, -0.05407977, -0.05710047,
-0.0689636 , -0.11936722, -0.18973955, -0.02709115, -0.03890304,
-0.02634295, 0.19374818, -0.04111287, -0.0311527 , -0.07879431,
-0.0193336 , -0.01988679, -0.01376599, -0.07879431, 0.94289953,
-0.06027915, -0.02104007, -0.0689636 , -0.04717464, -0.04465416,
0.92916572, -0.03999333, -0.06192993, -0.05407977, -0.04982997,
-0.46087756, -0.09720799, -0.04589793, -0.07083428, -0.0193336 ,
-0.12242534, -0.12242534, -0.05407977, -0.01776301, -0.0311527 ,
-0.0689636 , -0.02421701, -0.13200634, -0.19874565, -0.03293737,
-0.82774282], atol=1.0e-8)
np.testing.assert_allclose(results.resid_working,
[ -1.71062283e-03, -1.53549840e-03, -8.42423701e-04,
-4.42798906e-03, -8.12073047e-03, -5.71934606e-03,
-2.12046213e-03, -5.34278480e-02, -5.16550074e-03,
9.82823035e-03, -9.52067472e-02, 1.48142818e-01,
-3.59779501e-03, -2.00993083e-03, -3.87619325e-04,
-2.62379729e-03, -4.33370579e-04, -1.10808799e-03,
-6.75670103e-04, -2.48818484e-03, -6.10129090e-02,
6.25511612e-02, -1.10808799e-03, -1.98451739e-02,
-3.41454749e-03, -2.61928659e-04, -4.09867263e-04,
-2.34090923e-04, -3.56621577e-02, -2.00993083e-03,
-4.33370579e-04, -2.76645832e-03, -9.40257152e-04,
-6.75670103e-04, -2.21289369e-04, -6.10129090e-02,
1.29061842e-01, -4.90775251e-03, -1.19671283e-02,
1.41347263e-01, -1.47631680e-01, -6.75670103e-04,
-4.58198217e-04, -4.66208406e-03, -3.07429001e-03,
-7.11923401e-02, -1.33191898e-04, -2.61928659e-04,
-5.90659690e-02, -6.75670103e-04, -2.46673839e-02,
-6.75670103e-04, -3.09919962e-04, -7.14047519e-04,
1.08085429e-01, 1.43161630e-01, -1.62077632e-03,
-3.79032977e-03, -4.66208406e-03, -5.71934606e-03,
7.44566288e-02, -1.30492035e-03, -3.46630910e-04,
-2.34090923e-04, -1.30492035e-03, -8.90029618e-04,
-6.75670103e-04, -8.90029618e-04, -5.16550074e-03,
-1.49131762e-04, 1.37018624e-01, -9.87652847e-03,
-3.59779501e-03, -8.53083698e-03, -1.97726627e-04,
-3.46630910e-04, -4.42798906e-03, -7.97307494e-04,
-5.16550074e-03, -2.26348718e-02, -8.53083698e-03,
-4.09867263e-04, 1.18189219e-01, -9.40257152e-04,
-3.46630910e-04, -2.07414715e-02, -1.62077632e-03,
-1.04913757e-03, -4.33370579e-04, -8.42423701e-04,
-5.72261321e-04, -1.58375811e-02, -9.93244730e-04,
-1.62077632e-03, -1.03659408e-02, -4.66208406e-03,
-3.41454749e-03, -4.58198217e-04, -3.99257703e-03,
-8.42423701e-04, -4.90775251e-03, -6.04877746e-04,
-2.77048947e-04, -6.50004229e-02, -4.58198217e-04,
-1.17025566e-03, -1.23580799e-03, 1.46831486e-01,
-3.27769165e-04, -1.17025566e-03, -4.66208406e-03,
-1.71062283e-03, -1.53549840e-03, -1.23580799e-03,
-9.93244730e-04, -3.27769165e-04, -1.23580799e-03,
-3.41454749e-03, -1.10808799e-03, -8.42423701e-04,
-8.90029618e-04, -4.42798906e-03, -6.75670103e-04,
-6.75670103e-04, -8.90029618e-04, -4.84422741e-04,
-1.58375811e-02, -7.35405096e-03, -3.87619325e-04,
-2.62379729e-03, -1.10808799e-03, -1.10808799e-03,
-7.54555329e-04, -8.90029618e-04, -3.99257703e-03,
-3.99257703e-03, -1.17025566e-03, -7.54555329e-04,
-6.10129090e-02, -1.04913757e-03, -1.31530576e-02,
-2.00993083e-03, -2.00993083e-03, -1.53549840e-03,
-5.16550074e-03, -1.14104800e-02, -2.76645832e-03,
-1.17025566e-03, -3.87619325e-04, -4.09867263e-04,
1.48037813e-01, 1.06365931e-01, -6.39314594e-04,
-4.42798906e-03, -4.09867263e-04, -7.97307494e-04,
-2.62379729e-03, -4.58198217e-04, -3.09919962e-04,
-6.32800839e-03, -1.14104800e-02, -2.35929680e-03,
-3.27769165e-04, -2.21289369e-04, -7.97307494e-04,
-1.73489362e-02, -5.71934606e-03, -7.97307494e-04,
-9.40802551e-03, -1.90495384e-03, -5.16550074e-03,
-5.43585191e-03, -1.51253748e-02, -4.33370579e-04,
-3.46630910e-04, -1.86893696e-04, -2.12046213e-03,
-1.25867293e-04, 5.07657192e-02, -3.87619325e-04,
-1.66959104e-04, -1.25477263e-02, -9.93244730e-04,
-2.93030065e-04, -2.00993083e-03, -2.76645832e-03,
-8.95970087e-03, -6.39314594e-04, -1.53549840e-03,
-6.75670103e-04, -1.23580799e-03, -3.70792339e-02,
1.01184411e-01, -3.87619325e-04, 1.46321062e-01,
-3.41454749e-03, -4.09867263e-04, -2.68442736e-02,
3.68583645e-02, -2.68442736e-02, -2.07414715e-02,
-1.86893696e-04, -1.04913757e-03, -7.35405096e-03,
-7.14047519e-04, -1.73489362e-02, 1.43973473e-01,
-9.40802551e-03, -1.71062283e-03, -1.43894386e-01,
-4.20497779e-03, -1.71062283e-03, -1.17025566e-03,
-5.71934606e-03, -5.16550074e-03, -1.98451739e-02,
2.18574168e-02, 7.44566288e-02, -6.75670103e-04,
-1.06135519e-01, -6.99614755e-03, -1.04913757e-03,
-7.54555329e-04, -1.17025566e-03, -3.46630910e-04,
6.98449121e-02, -2.00993083e-03, -4.90775251e-03,
-9.40257152e-04, -2.00993083e-03, -6.32800839e-03,
1.48072729e-01, -5.12120512e-04, -6.75670103e-04,
-1.23580799e-03, -1.89814939e-02, -2.62379729e-03,
-2.62379729e-03, 1.16328328e-01, -3.16494123e-02,
-3.46630910e-04, -2.34090923e-04, -2.47623705e-04,
-4.09867263e-04, -5.72261321e-04, -6.75670103e-04,
-1.14104800e-02, -3.07429001e-03, -2.48818484e-03,
-8.53083698e-03, 2.92419496e-02, -2.62379729e-03,
-2.47623705e-04, -5.16550074e-03, -3.41454749e-03,
-2.07414715e-02, -5.71934606e-03, -5.12120512e-04,
-1.62077632e-03, -2.23682205e-03, -7.97307494e-04,
-2.00993083e-03, -1.23580799e-03, -1.62077632e-03,
-1.58375811e-02, -8.95970087e-03, -3.79032977e-03,
1.48072729e-01, -8.95970087e-03, -1.24186489e-01,
-5.71934606e-03, -3.41454749e-03, -3.41454749e-03,
-2.76645832e-03, -3.07429001e-03, -4.42798906e-03,
-1.25477263e-02, -2.91702648e-02, -7.14047519e-04,
-1.45456868e-03, -6.75670103e-04, 3.02653681e-02,
-1.62077632e-03, -9.40257152e-04, -5.71934606e-03,
-3.66561274e-04, -3.87619325e-04, -1.86893696e-04,
-5.71934606e-03, 5.07657192e-02, -3.41454749e-03,
-4.33370579e-04, -4.42798906e-03, -2.12046213e-03,
-1.90495384e-03, 6.11546973e-02, -1.53549840e-03,
-3.59779501e-03, -2.76645832e-03, -2.35929680e-03,
-1.14513988e-01, -8.53083698e-03, -2.00993083e-03,
-4.66208406e-03, -3.66561274e-04, -1.31530576e-02,
-1.31530576e-02, -2.76645832e-03, -3.09919962e-04,
-9.40257152e-04, -4.42798906e-03, -5.72261321e-04,
-1.51253748e-02, -3.16494123e-02, -1.04913757e-03,
-1.18023417e-01])
np.testing.assert_allclose(results.resid_pearson,
[-0.21006498, -0.20410641, -0.17423009, -0.27216147, -0.3234511 ,
-0.29246179, -0.22250903, -0.60917574, -0.28416602, 0.3421141 ,
-0.81229277, 1.42158361, -0.25694055, -0.21933056, -0.142444 ,
-0.23569027, -0.14660243, -0.18722578, -0.16448609, -0.2323235 ,
-0.64526275, 3.57006696, -0.18722578, -0.42513819, -0.25327023,
-0.12879668, -0.14450826, -0.12514332, -0.5200069 , -0.21933056,
-0.14660243, -0.23910582, -0.17931646, -0.16448609, -0.12335569,
-0.64526275, 1.97919183, -0.28010679, -0.36290807, 1.71396874,
-1.3440334 , -0.16448609, -0.14872695, -0.27610555, -0.24608613,
-0.69339243, -0.1083734 , -0.12879668, -0.63604537, -0.16448609,
-0.45684893, -0.16448609, -0.13447767, -0.16686977, 2.3862634 ,
1.66535145, -0.20706426, -0.26066405, -0.27610555, -0.29246179,
3.18191348, -0.19548397, -0.13840353, -0.12514332, -0.19548397,
-0.17675498, -0.16448609, -0.17675498, -0.28416602, -0.11153719,
1.81550268, -0.34261205, -0.25694055, -0.32813846, -0.11985666,
-0.13840353, -0.27216147, -0.17174127, -0.28416602, -0.44389026,
-0.32813846, -0.14450826, 2.18890738, -0.17931646, -0.13840353,
-0.43129917, -0.20706426, -0.18455132, -0.14660243, -0.17423009,
-0.1575374 , -0.39562855, -0.18191506, -0.20706426, -0.34757708,
-0.27610555, -0.25327023, -0.14872695, -0.26444152, -0.17423009,
-0.28010679, -0.15982038, -0.13066317, -0.66410018, -0.14872695,
-0.189939 , -0.19269154, 1.30401147, -0.13642648, -0.189939 ,
-0.27610555, -0.21006498, -0.20410641, -0.19269154, -0.18191506,
-0.13642648, -0.19269154, -0.25327023, -0.18722578, -0.17423009,
-0.17675498, -0.27216147, -0.16448609, -0.16448609, -0.17675498,
-0.15088226, -0.39562855, -0.3142763 , -0.142444 , -0.23569027,
-0.18722578, -0.18722578, -0.169288 , -0.17675498, -0.26444152,
-0.26444152, -0.189939 , -0.169288 , -0.64526275, -0.18455132,
-0.3735026 , -0.21933056, -0.21933056, -0.20410641, -0.28416602,
-0.35772404, -0.23910582, -0.189939 , -0.142444 , -0.14450826,
1.38125991, 2.42084442, -0.16213645, -0.27216147, -0.14450826,
-0.17174127, -0.23569027, -0.14872695, -0.13447767, -0.30099975,
-0.35772404, -0.22900483, -0.13642648, -0.12335569, -0.17174127,
-0.4071783 , -0.29246179, -0.17174127, -0.33771794, -0.21619749,
-0.28416602, -0.28828407, -0.38997712, -0.14660243, -0.13840353,
-0.11814455, -0.22250903, -0.10682532, 4.06361781, -0.142444 ,
-0.11479334, -0.36816723, -0.18191506, -0.1325567 , -0.21933056,
-0.23910582, -0.33289374, -0.16213645, -0.20410641, -0.16448609,
-0.19269154, -0.52754269, 2.52762346, -0.142444 , 1.28538406,
-0.25327023, -0.14450826, -0.47018591, 4.89940505, -0.47018591,
-0.43129917, -0.11814455, -0.18455132, -0.3142763 , -0.16686977,
-0.4071783 , 1.64156241, -0.33771794, -0.21006498, -1.6439517 ,
-0.26827373, -0.21006498, -0.189939 , -0.29246179, -0.28416602,
-0.42513819, 6.53301013, 3.18191348, -0.16448609, -0.87288109,
-0.30978696, -0.18455132, -0.169288 , -0.189939 , -0.13840353,
3.32226189, -0.21933056, -0.28010679, -0.17931646, -0.21933056,
-0.30099975, 1.44218477, -0.1530688 , -0.16448609, -0.19269154,
-0.41906522, -0.23569027, -0.23569027, 0.93662539, -0.4980393 ,
-0.13840353, -0.12514332, -0.12695686, -0.14450826, -0.1575374 ,
-0.16448609, -0.35772404, -0.24608613, -0.2323235 , -0.32813846,
5.57673284, -0.23569027, -0.12695686, -0.28416602, -0.25327023,
-0.43129917, -0.29246179, -0.1530688 , -0.20706426, -0.22573357,
-0.17174127, -0.21933056, -0.19269154, -0.20706426, -0.39562855,
-0.33289374, -0.26066405, 1.44218477, -0.33289374, -0.99355423,
-0.29246179, -0.25327023, -0.25327023, -0.23910582, -0.24608613,
-0.27216147, -0.36816723, -0.48391225, -0.16686977, -0.20119082,
-0.16448609, 0.49021146, -0.20706426, -0.17931646, -0.29246179,
-0.14040923, -0.142444 , -0.11814455, -0.29246179, 4.06361781,
-0.25327023, -0.14660243, -0.27216147, -0.22250903, -0.21619749,
3.6218033 , -0.20410641, -0.25694055, -0.23910582, -0.22900483,
-0.92458976, -0.32813846, -0.21933056, -0.27610555, -0.14040923,
-0.3735026 , -0.3735026 , -0.23910582, -0.13447767, -0.17931646,
-0.27216147, -0.1575374 , -0.38997712, -0.4980393 , -0.18455132,
-2.19209332])
np.testing.assert_allclose(results.resid_anscombe,
[-0.31237627, -0.3036605 , -0.25978208, -0.40240831, -0.47552289,
-0.43149255, -0.33053793, -0.85617194, -0.41962951, 0.50181328,
-1.0954382 , 1.66940149, -0.38048321, -0.3259044 , -0.21280762,
-0.34971301, -0.21896842, -0.27890356, -0.2454118 , -0.34482158,
-0.90063409, 2.80452413, -0.27890356, -0.61652596, -0.37518169,
-0.19255932, -0.2158664 , -0.18713159, -0.74270558, -0.3259044 ,
-0.21896842, -0.35467084, -0.2672722 , -0.2454118 , -0.18447466,
-0.90063409, 2.05763941, -0.41381347, -0.53089521, 1.88552083,
-1.60654218, -0.2454118 , -0.22211425, -0.40807333, -0.3647888 ,
-0.95861559, -0.16218047, -0.19255932, -0.88935802, -0.2454118 ,
-0.65930821, -0.2454118 , -0.20099345, -0.24892975, 2.28774016,
1.85167195, -0.30798858, -0.38585584, -0.40807333, -0.43149255,
2.65398426, -0.2910267 , -0.20681747, -0.18713159, -0.2910267 ,
-0.26350118, -0.2454118 , -0.26350118, -0.41962951, -0.16689207,
1.95381191, -0.50251231, -0.38048321, -0.48214234, -0.17927213,
-0.20681747, -0.40240831, -0.25611424, -0.41962951, -0.64189694,
-0.48214234, -0.2158664 , 2.18071204, -0.2672722 , -0.20681747,
-0.62488429, -0.30798858, -0.27497271, -0.21896842, -0.25978208,
-0.23514749, -0.57618899, -0.27109582, -0.30798858, -0.50947546,
-0.40807333, -0.37518169, -0.22211425, -0.39130036, -0.25978208,
-0.41381347, -0.2385213 , -0.19533116, -0.92350689, -0.22211425,
-0.28288904, -0.28692985, 1.5730846 , -0.20388497, -0.28288904,
-0.40807333, -0.31237627, -0.3036605 , -0.28692985, -0.27109582,
-0.20388497, -0.28692985, -0.37518169, -0.27890356, -0.25978208,
-0.26350118, -0.40240831, -0.2454118 , -0.2454118 , -0.26350118,
-0.22530448, -0.57618899, -0.46253505, -0.21280762, -0.34971301,
-0.27890356, -0.27890356, -0.25249702, -0.26350118, -0.39130036,
-0.39130036, -0.28288904, -0.25249702, -0.90063409, -0.27497271,
-0.5456246 , -0.3259044 , -0.3259044 , -0.3036605 , -0.41962951,
-0.52366614, -0.35467084, -0.28288904, -0.21280762, -0.2158664 ,
1.63703418, 2.30570989, -0.24194253, -0.40240831, -0.2158664 ,
-0.25611424, -0.34971301, -0.22211425, -0.20099345, -0.44366892,
-0.52366614, -0.33999576, -0.20388497, -0.18447466, -0.25611424,
-0.59203547, -0.43149255, -0.25611424, -0.49563627, -0.32133344,
-0.41962951, -0.42552227, -0.56840788, -0.21896842, -0.20681747,
-0.17672552, -0.33053793, -0.15987433, 2.9768074 , -0.21280762,
-0.17173916, -0.53821445, -0.27109582, -0.19814236, -0.3259044 ,
-0.35467084, -0.48884654, -0.24194253, -0.3036605 , -0.2454118 ,
-0.28692985, -0.75249089, 2.35983933, -0.21280762, 1.55726719,
-0.37518169, -0.2158664 , -0.67712261, 3.23165236, -0.67712261,
-0.62488429, -0.17672552, -0.27497271, -0.46253505, -0.24892975,
-0.59203547, 1.83482464, -0.49563627, -0.31237627, -1.83652534,
-0.39681759, -0.31237627, -0.28288904, -0.43149255, -0.41962951,
-0.61652596, 3.63983609, 2.65398426, -0.2454118 , -1.16171662,
-0.45616505, -0.27497271, -0.25249702, -0.28288904, -0.20681747,
2.71015945, -0.3259044 , -0.41381347, -0.2672722 , -0.3259044 ,
-0.44366892, 1.68567947, -0.22853969, -0.2454118 , -0.28692985,
-0.60826548, -0.34971301, -0.34971301, 1.2290223 , -0.71397735,
-0.20681747, -0.18713159, -0.1898263 , -0.2158664 , -0.23514749,
-0.2454118 , -0.52366614, -0.3647888 , -0.34482158, -0.48214234,
3.41271513, -0.34971301, -0.1898263 , -0.41962951, -0.37518169,
-0.62488429, -0.43149255, -0.22853969, -0.30798858, -0.3352348 ,
-0.25611424, -0.3259044 , -0.28692985, -0.30798858, -0.57618899,
-0.48884654, -0.38585584, 1.68567947, -0.48884654, -1.28709718,
-0.43149255, -0.37518169, -0.37518169, -0.35467084, -0.3647888 ,
-0.40240831, -0.53821445, -0.69534436, -0.24892975, -0.29939131,
-0.2454118 , 0.70366797, -0.30798858, -0.2672722 , -0.43149255,
-0.2097915 , -0.21280762, -0.17672552, -0.43149255, 2.9768074 ,
-0.37518169, -0.21896842, -0.40240831, -0.33053793, -0.32133344,
2.82351017, -0.3036605 , -0.38048321, -0.35467084, -0.33999576,
-1.21650102, -0.48214234, -0.3259044 , -0.40807333, -0.2097915 ,
-0.5456246 , -0.5456246 , -0.35467084, -0.20099345, -0.2672722 ,
-0.40240831, -0.23514749, -0.56840788, -0.71397735, -0.27497271,
-2.18250381])
np.testing.assert_allclose(results.resid_deviance,
[-0.29387552, -0.2857098 , -0.24455876, -0.37803944, -0.44609851,
-0.40514674, -0.31088148, -0.79449324, -0.39409528, 0.47049798,
-1.00668653, 1.48698001, -0.35757692, -0.30654405, -0.20043547,
-0.32882173, -0.20622595, -0.26249995, -0.23106769, -0.32424676,
-0.83437766, 2.28941155, -0.26249995, -0.57644334, -0.35262564,
-0.18139734, -0.20331052, -0.17629229, -0.69186337, -0.30654405,
-0.20622595, -0.33345774, -0.251588 , -0.23106769, -0.17379306,
-0.83437766, 1.78479093, -0.38867448, -0.4974393 , 1.65565332,
-1.43660134, -0.23106769, -0.20918228, -0.38332275, -0.34291558,
-0.88609006, -0.15281596, -0.18139734, -0.82428104, -0.23106769,
-0.61571821, -0.23106769, -0.18932865, -0.234371 , 1.94999969,
1.62970871, -0.2897651 , -0.36259328, -0.38332275, -0.40514674,
2.19506559, -0.27386827, -0.19480442, -0.17629229, -0.27386827,
-0.24804925, -0.23106769, -0.24804925, -0.39409528, -0.15725009,
1.7074519 , -0.47114617, -0.35757692, -0.4522457 , -0.16889886,
-0.19480442, -0.37803944, -0.24111595, -0.39409528, -0.59975102,
-0.4522457 , -0.20331052, 1.87422489, -0.251588 , -0.19480442,
-0.5841272 , -0.2897651 , -0.25881274, -0.20622595, -0.24455876,
-0.22142749, -0.53929061, -0.25517563, -0.2897651 , -0.47760126,
-0.38332275, -0.35262564, -0.20918228, -0.36767536, -0.24455876,
-0.38867448, -0.2245965 , -0.18400413, -0.85481866, -0.20918228,
-0.26623785, -0.27002708, 1.40955093, -0.19204738, -0.26623785,
-0.38332275, -0.29387552, -0.2857098 , -0.27002708, -0.25517563,
-0.19204738, -0.27002708, -0.35262564, -0.26249995, -0.24455876,
-0.24804925, -0.37803944, -0.23106769, -0.23106769, -0.24804925,
-0.21218006, -0.53929061, -0.43402996, -0.20043547, -0.32882173,
-0.26249995, -0.26249995, -0.23772023, -0.24804925, -0.36767536,
-0.36767536, -0.26623785, -0.23772023, -0.83437766, -0.25881274,
-0.51106408, -0.30654405, -0.30654405, -0.2857098 , -0.39409528,
-0.49074728, -0.33345774, -0.26623785, -0.20043547, -0.20331052,
1.46111186, 1.96253843, -0.22780971, -0.37803944, -0.20331052,
-0.24111595, -0.32882173, -0.20918228, -0.18932865, -0.41648237,
-0.49074728, -0.31973217, -0.19204738, -0.17379306, -0.24111595,
-0.55389988, -0.40514674, -0.24111595, -0.46476893, -0.30226435,
-0.39409528, -0.39958581, -0.53211065, -0.20622595, -0.19480442,
-0.16650295, -0.31088148, -0.15064545, 2.39288231, -0.20043547,
-0.16181126, -0.5042114 , -0.25517563, -0.18664773, -0.30654405,
-0.33345774, -0.45846897, -0.22780971, -0.2857098 , -0.23106769,
-0.27002708, -0.7007597 , 1.99998811, -0.20043547, 1.39670618,
-0.35262564, -0.20331052, -0.63203077, 2.53733821, -0.63203077,
-0.5841272 , -0.16650295, -0.25881274, -0.43402996, -0.234371 ,
-0.55389988, 1.61672923, -0.46476893, -0.29387552, -1.61804148,
-0.37282386, -0.29387552, -0.26623785, -0.40514674, -0.39409528,
-0.57644334, 2.74841605, 2.19506559, -0.23106769, -1.06433539,
-0.42810736, -0.25881274, -0.23772023, -0.26623785, -0.19480442,
2.23070414, -0.30654405, -0.38867448, -0.251588 , -0.30654405,
-0.41648237, 1.49993075, -0.21521982, -0.23106769, -0.27002708,
-0.5688444 , -0.32882173, -0.32882173, 1.12233423, -0.66569789,
-0.19480442, -0.17629229, -0.17882689, -0.20331052, -0.22142749,
-0.23106769, -0.49074728, -0.34291558, -0.32424676, -0.4522457 ,
2.63395309, -0.32882173, -0.17882689, -0.39409528, -0.35262564,
-0.5841272 , -0.40514674, -0.21521982, -0.2897651 , -0.3152773 ,
-0.24111595, -0.30654405, -0.27002708, -0.2897651 , -0.53929061,
-0.45846897, -0.36259328, 1.49993075, -0.45846897, -1.17192274,
-0.40514674, -0.35262564, -0.35262564, -0.33345774, -0.34291558,
-0.37803944, -0.5042114 , -0.64869028, -0.234371 , -0.28170899,
-0.23106769, 0.65629132, -0.2897651 , -0.251588 , -0.40514674,
-0.19760028, -0.20043547, -0.16650295, -0.40514674, 2.39288231,
-0.35262564, -0.20622595, -0.37803944, -0.31088148, -0.30226435,
2.30104857, -0.2857098 , -0.35757692, -0.33345774, -0.31973217,
-1.11158678, -0.4522457 , -0.30654405, -0.38332275, -0.19760028,
-0.51106408, -0.51106408, -0.33345774, -0.18932865, -0.251588 ,
-0.37803944, -0.22142749, -0.53211065, -0.66569789, -0.25881274,
-1.87550882])
np.testing.assert_allclose(results.null,
[ 0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759, 0.08860759, 0.08860759, 0.08860759, 0.08860759,
0.08860759])
self.assertAlmostEqual(results.D2, .200712816165)
self.assertAlmostEqual(results.adj_D2, 0.19816731557930456)
if __name__ == '__main__':
unittest.main()
|
TaylorOshan/pysal
|
pysal/contrib/glm/tests/test_glm.py
|
Python
|
bsd-3-clause
| 67,888
|
[
"COLUMBUS",
"Gaussian"
] |
bce2a91db44c26edecd69400768880cf3c52be778861b27f2c9c8b3a5dc94503
|
"""testCount3.py.
Written by: Brian O'Dell, October 2017
A program to run each program a 500 times per thread count.
Then uses the data collected to make graphs and tables that
are useful to evaluate the programs running time.
"""
from subprocess import *
from numba import jit
import numpy as np
import csv as csv
import pandas as pd
from pandas.plotting import table
import matplotlib.pyplot as plt
@jit
def doCount(name):
"""Do multiple executions of the program.
Call the C program multiple times with variable arguments to gather data
The name of the executable should exist before running
"""
j = 0
while (j < 1025):
for i in range(0, 501):
call([name, "-t", str(j), "-w"])
if (j == 0):
j = 1
else:
j = 2*j
@jit
def exportData(name):
"""Turn the data into something meaningful.
Takes all the data gets the average and standard deviation for each
number of threads. Then plots a graph based on it. Also, makes
a csv with the avg and stddev
"""
DF = pd.read_csv(name + ".csv")
f = {'ExecTime': ['mean', 'std']}
# group by the number of threads in the csv and
# apply the mean and standard deviation functions to the groups
avgDF = DF.groupby('NumThreads').agg(f)
avgTable = DF.groupby('NumThreads', as_index=False).agg(f)
# When the data csv was saved we used 0 to indicate serial execution
# this was so the rows would be in numerical order instead of Alphabetical
# Now rename index 0 to Serial to be an accurate representation
indexList = avgDF.index.tolist()
indexList[0] = 'Serial'
avgDF.index = indexList
# make the bar chart and set the axes
avgPlot = avgDF.plot(kind='bar', title=('Run Times Using ' + name),
legend='False', figsize=(15, 8))
avgPlot.set_xlabel("Number of Threads")
avgPlot.set_ylabel("Run Time (seconds)")
# put the data values on top of the bars for clarity
avgPlot.legend(['mean', 'std deviation'])
for p in avgPlot.patches:
avgPlot.annotate((str(p.get_height())[:6]),
(p.get_x()-.01, p.get_height()), fontsize=9)
# save the files we need
plt.savefig(name + 'Graph.png')
avgTable.to_csv(name + 'Table.csv', index=False, encoding='utf-8')
def main():
"""Do both functions."""
doCount("./count3")
exportData("ompRuntimes")
if __name__ == '__main__':
main()
|
brian-o/CS-CourseWork
|
CS491/count3openMP/testCount3.py
|
Python
|
gpl-3.0
| 2,463
|
[
"Brian"
] |
80536cb4fc47bfe07980ed7c1b14744c97268a9a83c893f37452f98b8d88c980
|
"""High level summaries of samples and programs with MultiQC.
https://github.com/ewels/MultiQC
"""
import collections
import glob
import mimetypes
import os
import pandas as pd
import shutil
import pybedtools
import toolz as tz
import yaml
from bcbio import utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.log import logger
from bcbio.provenance import do
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import config_utils
from bcbio.bam import ref
from bcbio.structural import annotate
from bcbio.variation import bedutils
def summary(*samples):
"""Summarize all quality metrics together"""
samples = utils.unpack_worlds(samples)
work_dir = dd.get_work_dir(samples[0])
multiqc = config_utils.get_program("multiqc", samples[0]["config"])
if not multiqc:
logger.debug("multiqc not found. Update bcbio_nextgen.py tools to fix this issue.")
out_dir = utils.safe_makedir(os.path.join(work_dir, "qc", "multiqc"))
out_data = os.path.join(out_dir, "multiqc_data")
out_file = os.path.join(out_dir, "multiqc_report.html")
samples = _report_summary(samples, os.path.join(out_dir, "report"))
if not utils.file_exists(out_file):
with tx_tmpdir(samples[0], work_dir) as tx_out:
in_files = _get_input_files(samples, out_dir, tx_out)
in_files += _merge_metrics(samples, out_dir)
if _one_exists(in_files):
with utils.chdir(out_dir):
_create_config_file(out_dir, samples)
input_list_file = _create_list_file(in_files, out_dir)
if dd.get_tmp_dir(samples[0]):
export_tmp = "export TMPDIR=%s &&" % dd.get_tmp_dir(samples[0])
else:
export_tmp = ""
path_export = utils.local_path_export()
cmd = "{path_export}{export_tmp} {multiqc} -f -l {input_list_file} -o {tx_out}"
do.run(cmd.format(**locals()), "Run multiqc")
if utils.file_exists(os.path.join(tx_out, "multiqc_report.html")):
shutil.move(os.path.join(tx_out, "multiqc_report.html"), out_file)
shutil.move(os.path.join(tx_out, "multiqc_data"), out_data)
out = []
for i, data in enumerate(_group_by_samplename(samples)):
if i == 0:
if utils.file_exists(out_file):
data_files = glob.glob(os.path.join(out_dir, "multiqc_data", "*.txt"))
data_files += glob.glob(os.path.join(out_dir, "report", "*", "*.bed"))
data_files += glob.glob(os.path.join(out_dir, "report", "*", "*.txt"))
data_files += glob.glob(os.path.join(out_dir, "report", "*", "*.tsv"))
data_files += glob.glob(os.path.join(out_dir, "report", "*.R*"))
if "summary" not in data:
data["summary"] = {}
data["summary"]["multiqc"] = {"base": out_file, "secondary": data_files}
out.append([data])
return out
def _one_exists(input_files):
"""
at least one file must exist for multiqc to run properly
"""
for f in input_files:
if os.path.exists(f):
return True
return False
def _get_input_files(samples, base_dir, tx_out_dir):
"""Retrieve input files, keyed by sample and QC method name.
Stages files into the work directory to ensure correct names for
MultiQC sample assessment when running with CWL.
"""
in_files = collections.defaultdict(list)
for data in samples:
sum_qc = tz.get_in(["summary", "qc"], data, {})
if sum_qc in [None, "None"]:
sum_qc = {}
elif isinstance(sum_qc, basestring):
sum_qc = {dd.get_algorithm_qc(data)[0]: sum_qc}
elif not isinstance(sum_qc, dict):
raise ValueError("Unexpected summary qc: %s" % sum_qc)
for program, pfiles in sum_qc.items():
if isinstance(pfiles, dict):
pfiles = [pfiles["base"]] + pfiles["secondary"]
# CWL: presents output files as single file plus associated secondary files
elif isinstance(pfiles, basestring):
if os.path.exists(pfiles):
pfiles = [os.path.join(os.path.dirname(pfiles), x) for x in os.listdir(os.path.dirname(pfiles))]
else:
pfiles = []
in_files[(dd.get_sample_name(data), program)].extend(pfiles)
staged_files = []
for (sample, program), files in in_files.items():
cur_dir = utils.safe_makedir(os.path.join(tx_out_dir, sample, program))
for f in files:
if _check_multiqc_input(f) and _is_good_file_for_multiqc(f):
if _in_temp_directory(f):
staged_f = os.path.join(cur_dir, os.path.basename(f))
shutil.copy(f, staged_f)
staged_files.append(staged_f)
else:
staged_files.append(f)
# Back compatible -- to migrate to explicit specifications in input YAML
staged_files += ["trimmed", "htseq-count/*summary"]
# Add in created target_info file
staged_files += [os.path.join(base_dir, "report", "metrics", "target_info.yaml")]
return sorted(list(set(staged_files)))
def _in_temp_directory(f):
return any(x.startswith("tmp") for x in f.split("/"))
def _group_by_samplename(samples):
"""Group samples split by QC method back into a single sample.
"""
out = collections.defaultdict(list)
for data in samples:
out[(dd.get_sample_name(data), dd.get_align_bam(data))].append(data)
return [xs[0] for xs in out.values()]
def _create_list_file(dirs, out_dir):
out_file = os.path.join(out_dir, "list_files.txt")
with open(out_file, "w") as f:
f.write('\n'.join(dirs))
return out_file
def _create_config_file(out_dir, samples):
"""Provide configuration file hiding duplicate columns.
Future entry point for providing top level configuration of output reports.
"""
out_file = os.path.join(out_dir, "multiqc_config.yaml")
out = {"table_columns_visible":
{"SnpEff": {"Change_rate": False,
"Ts_Tv_ratio": False,
"Number_of_variants_before_filter": False},
"samtools": {"error_rate": False}},
"module_order": ["bcbio", "samtools", "bcftools", "picard", "qualimap", "snpeff", "fastqc"]}
with open(out_file, "w") as out_handle:
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
return out_file
def _check_multiqc_input(path):
"""Check if file exists, and return empty if it doesn't"""
if utils.file_exists(path):
return path
# ## report and coverage
def _is_good_file_for_multiqc(fpath):
"""Returns False if the file is binary or image."""
# Use mimetypes to exclude binary files where possible
(ftype, encoding) = mimetypes.guess_type(fpath)
if encoding is not None:
return False
if ftype is not None and ftype.startswith('image'):
return False
return True
def _report_summary(samples, out_dir):
"""
Run coverage report with bcbiocov package
"""
try:
import bcbreport.prepare as bcbreport
except ImportError:
logger.info("skipping report. No bcbreport installed.")
return samples
# samples = utils.unpack_worlds(samples)
work_dir = dd.get_work_dir(samples[0])
parent_dir = utils.safe_makedir(out_dir)
with utils.chdir(parent_dir):
logger.info("copy qsignature")
qsignature_fn = os.path.join(work_dir, "qc", "qsignature", "qsignature.ma")
if qsignature_fn: # this need to be inside summary/qc dict
if utils.file_exists(qsignature_fn) and not utils.file_exists("qsignature.ma"):
shutil.copy(qsignature_fn, "bcbio_qsignature.ma")
out_dir = utils.safe_makedir("fastqc")
logger.info("summarize fastqc")
with utils.chdir(out_dir):
_merge_fastqc(samples)
logger.info("summarize target information")
if samples[0].get("analysis", "").lower() in ["variant", "variant2"]:
samples = _merge_target_information(samples)
out_dir = utils.safe_makedir("coverage")
logger.info("summarize coverage")
for data in samples:
pfiles = tz.get_in(["summary", "qc", "coverage"], data, [])
if isinstance(pfiles, dict):
pfiles = [pfiles["base"]] + pfiles["secondary"]
elif pfiles:
pfiles = [pfiles]
for fn in pfiles:
if os.path.basename(fn).find("coverage_fixed") > -1:
utils.copy_plus(fn, os.path.join(out_dir, os.path.basename(fn)))
out_dir = utils.safe_makedir("variants")
logger.info("summarize variants")
for data in samples:
pfiles = tz.get_in(["summary", "qc", "variants"], data, [])
if isinstance(pfiles, dict):
pfiles = [pfiles["base"]] + pfiles["secondary"]
elif pfiles:
pfiles = [pfiles]
for fn in pfiles:
if os.path.basename(fn).find("gc-depth-parse.tsv") > -1:
utils.copy_plus(fn, os.path.join(out_dir, os.path.basename(fn)))
bcbreport.report(parent_dir)
out_report = os.path.join(parent_dir, "qc-coverage-report.html")
if not utils.file_exists(out_report):
rmd_file = os.path.join(parent_dir, "report-ready.Rmd")
run_file = "%s-run.R" % (os.path.splitext(out_report)[0])
with open(run_file, "w") as out_handle:
out_handle.write("""library(rmarkdown)\nrender("%s")\n""" % rmd_file)
# cmd = "%s %s" % (utils.Rscript_cmd(), run_file)
# Skip automated generation of coverage report to avoid error
# messages. We need to generalize coverage reporting and re-include.
# try:
# do.run(cmd, "Prepare coverage summary", log_error=False)
# except subprocess.CalledProcessError as msg:
# logger.info("Skipping generation of coverage report: %s" % (str(msg)))
if utils.file_exists("report-ready.html"):
shutil.move("report-ready.html", out_report)
return samples
def _parse_disambiguate(disambiguatestatsfilename):
"""Parse disambiguation stats from given file.
"""
disambig_stats = [0, 0, 0]
with open(disambiguatestatsfilename, "r") as in_handle:
for i, line in enumerate(in_handle):
fields = line.strip().split("\t")
if i == 0:
assert fields == ['sample', 'unique species A pairs', 'unique species B pairs', 'ambiguous pairs']
else:
disambig_stats = [x + int(y) for x, y in zip(disambig_stats, fields[1:])]
return disambig_stats
def _add_disambiguate(sample):
# check if disambiguation was run
if "disambiguate" in sample:
if utils.file_exists(sample["disambiguate"]["summary"]):
disambigStats = _parse_disambiguate(sample["disambiguate"]["summary"])
sample["summary"]["metrics"]["Disambiguated %s reads" % str(sample["genome_build"])] = disambigStats[0]
disambigGenome = (sample["config"]["algorithm"]["disambiguate"][0]
if isinstance(sample["config"]["algorithm"]["disambiguate"], (list, tuple))
else sample["config"]["algorithm"]["disambiguate"])
sample["summary"]["metrics"]["Disambiguated %s reads" % disambigGenome] = disambigStats[1]
sample["summary"]["metrics"]["Disambiguated ambiguous reads"] = disambigStats[2]
return sample
def _fix_duplicated_rate(dt):
"""Get RNA duplicated rate if exists and replace by samtools metric"""
if "Duplication_Rate_of_Mapped" in dt:
dt["Duplicates_pct"] = 100.0 * dt["Duplication_Rate_of_Mapped"]
return dt
def _merge_metrics(samples, out_dir):
"""Merge metrics from multiple QC steps
"""
logger.info("summarize metrics")
out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "metrics"))
sample_metrics = collections.defaultdict(dict)
for s in samples:
s = _add_disambiguate(s)
m = tz.get_in(['summary', 'metrics'], s)
if m:
for me in m.keys():
if isinstance(m[me], list) or isinstance(m[me], dict) or isinstance(m[me], tuple):
m.pop(me, None)
sample_metrics[dd.get_sample_name(s)].update(m)
out = []
for sample_name, m in sample_metrics.items():
sample_file = os.path.join(out_dir, "%s_bcbio.txt" % sample_name)
with file_transaction(samples[0], sample_file) as tx_out_file:
dt = pd.DataFrame(m, index=['1'])
dt.columns = [k.replace(" ", "_").replace("(", "").replace(")", "") for k in dt.columns]
dt['sample'] = sample_name
dt['rRNA_rate'] = m.get('rRNA_rate', "NA")
dt = _fix_duplicated_rate(dt)
dt.transpose().to_csv(tx_out_file, sep="\t", header=False)
out.append(sample_file)
return out
def _merge_fastqc(samples):
"""
merge all fastqc samples into one by module
"""
fastqc_list = collections.defaultdict(list)
seen = set()
for data in samples:
name = dd.get_sample_name(data)
if name in seen:
continue
seen.add(name)
fns = glob.glob(os.path.join(dd.get_work_dir(data), "qc", dd.get_sample_name(data), "fastqc") + "/*")
for fn in fns:
if fn.endswith("tsv"):
metric = os.path.basename(fn)
fastqc_list[metric].append([name, fn])
for metric in fastqc_list:
dt_by_sample = []
for fn in fastqc_list[metric]:
dt = pd.read_csv(fn[1], sep="\t")
dt['sample'] = fn[0]
dt_by_sample.append(dt)
dt = utils.rbind(dt_by_sample)
dt.to_csv(metric, sep="\t", index=False, mode ='w')
return samples
def _merge_target_information(samples):
metrics_dir = utils.safe_makedir("metrics")
out_file = os.path.abspath(os.path.join(metrics_dir, "target_info.yaml"))
if utils.file_exists(out_file):
return samples
genomes = set(dd.get_genome_build(data) for data in samples)
coverage_beds = set(dd.get_coverage(data) for data in samples)
original_variant_regions = set(dd.get_variant_regions_orig(data) for data in samples)
data = samples[0]
info = {}
# Reporting in MultiQC only if the genome is the same across all samples
if len(genomes) == 1:
info["genome_info"] = {
"name": dd.get_genome_build(data),
"size": sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data["config"])]),
}
# Reporting in MultiQC only if the target is the same across all samples
vcr_orig = None
if len(original_variant_regions) == 1 and list(original_variant_regions)[0] is not None:
vcr_orig = list(original_variant_regions)[0]
vcr_clean = bedutils.clean_file(vcr_orig, data)
info["variants_regions_info"] = {
"bed": vcr_orig,
"size": sum(len(x) for x in pybedtools.BedTool(dd.get_variant_regions_merged(data))),
"regions": pybedtools.BedTool(vcr_clean).count(),
}
gene_num = annotate.count_genes(vcr_clean, data)
if gene_num is not None:
info["variants_regions_info"]["genes"] = gene_num
else:
info["variants_regions_info"] = {
"bed": "callable regions",
}
# Reporting in MultiQC only if the target is the same across samples
if len(coverage_beds) == 1:
cov_bed = list(coverage_beds)[0]
if cov_bed is not None:
if vcr_orig and vcr_orig == cov_bed:
info["coverage_bed_info"] = info["variants_regions_info"]
else:
clean_bed = bedutils.clean_file(cov_bed, data, prefix="cov-", simple=True)
info["coverage_bed_info"] = {
"bed": cov_bed,
"size": pybedtools.BedTool(cov_bed).total_coverage(),
"regions": pybedtools.BedTool(clean_bed).count(),
}
gene_num = annotate.count_genes(clean_bed, data)
if gene_num is not None:
info["coverage_bed_info"]["genes"] = gene_num
else:
info["coverage_bed_info"] = info["variants_regions_info"]
coverage_intervals = set(data["config"]["algorithm"]["coverage_interval"] for data in samples)
if len(coverage_intervals) == 1:
info["coverage_interval"] = list(coverage_intervals)[0]
if info:
with open(out_file, "w") as out_handle:
yaml.safe_dump(info, out_handle)
return samples
|
brainstorm/bcbio-nextgen
|
bcbio/qc/multiqc.py
|
Python
|
mit
| 16,977
|
[
"HTSeq"
] |
e498f7ac31504ac5479fcd5ed3ad871225cf415ab98730226df09f66056faed6
|
import os
import csv
from paraview.simple import *
import xml.etree.ElementTree as ET
def ShowThresholdData(Data, ColourMapRange = None, NegativeThresholdValues = None, PositiveThresholdValues = None, ColourMapLegend = 'Sigma', BackgroundOpacityValue = 0.1, ScalarBarPos = None):
# create positive and negative thresholds of given data
##### DEFAULTS
DefaultColorMap = 0
DefaultSclarBarPosition = 0
# get data range, used in a few places
DataRange = Data.CellData[0].GetRange(0)
print "Data range : " + str(DataRange)
# check if we should do negative or positive thresholds
if NegativeThresholdValues is None:
print "Using Default Negative Threshold"
Do_Neg_Thres = True
NegativeThresholdValues = [DataRange[0], DataRange[0]/2]
else:
Do_Neg_Thres = any(NegativeThresholdValues)
if PositiveThresholdValues is None:
print "Using Default Positive Threshold"
Do_Pos_Thres = True
PositiveThresholdValues = [DataRange[1]/2, DataRange[1]]
else:
Do_Pos_Thres = any(PositiveThresholdValues)
#### CHECK INPUTS
if ScalarBarPos == None:
print "Using Default ScalarBar Position"
DefaultSclarBarPosition = 1
if ColourMapRange == None:
DefaultColorMap = 1
print "Using default colormaps"
DataMax = round(max(abs(i) for i in DataRange))
print "Data max : " + str(DataMax)
ColourMapRange = [-DataMax, DataMax]
### ACTUAL CODE
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
# create a new 'Legacy VTK Reader'
RenameSource('Data', Data)
# get active view
renderView1 = GetActiveViewOrCreate('RenderView')
# show data in view
Data_Display_Background = Show(Data, renderView1)
# show color bar/color legend
Data_Display_Background.SetScalarBarVisibility(renderView1, True)
# find the name of the array
CellData = Data.CellData[0]
ColourMapName = CellData.Name
#### SET DEFAULT COLORMAPS AND STUFF
print "Setting colourmap for data name: " + ColourMapName
# get color transfer function/color map for ColourMapName
# THIS IS THE DEFAULT PARAVIEW COLOURSCHEME WE KNOW AND LOVE
uLUT = GetColorTransferFunction(ColourMapName)
uLUT.RGBPoints = [-1, 0.231373, 0.298039, 0.752941, 0, 0.865003, 0.865003, 0.865003, 1, 0.705882, 0.0156863, 0.14902]
uLUT.ScalarRangeInitialized = 1.0
# get opacity transfer function/opacity map for ColourMapName
uPWF = GetOpacityTransferFunction(ColourMapName)
uPWF.Points = [-1, 0.0, 0.5, 0.0, 1, 1.0, 0.5, 0.0]
uPWF.ScalarRangeInitialized = 1
#### CHANGE SCALE TO DATA
# Rescale transfer function
uLUT.RescaleTransferFunction(ColourMapRange[0], ColourMapRange[1])
# Rescale transfer function
uPWF.RescaleTransferFunction(ColourMapRange[0], ColourMapRange[1])
### RENAME STUFF
print "Setting colourmap legend text to : " + ColourMapLegend
# get color legend/bar for uLUT in view renderView1
uLUTColorBar = GetScalarBar(uLUT, renderView1)
# Properties modified on uLUTColorBar
uLUTColorBar.Title = ColourMapLegend
#### CREATE THRESHOLDS
if Do_Neg_Thres:
print "Showing negative threshold with range :" + str(NegativeThresholdValues)
# create a new 'Threshold'
Negative_Threshold = Threshold(Input=Data)
# Properties modified on threshold1
Negative_Threshold.ThresholdRange = NegativeThresholdValues
# show data in view
Negative_Threshold_Display = Show(Negative_Threshold, renderView1)
# show color bar/color legend
Negative_Threshold_Display.SetScalarBarVisibility(renderView1, True)
#Rename it to something nicer
RenameSource('Negative_Threshold', Negative_Threshold)
if Do_Pos_Thres:
print "Showing positive threshold with range :" + str(PositiveThresholdValues)
# create a new 'Threshold'
Positive_Threshold = Threshold(Input=Data)
# Properties modified on threshold1
Positive_Threshold.ThresholdRange = PositiveThresholdValues
# show data in view
Positive_Threshold_Display = Show(Positive_Threshold, renderView1)
# show color bar/color legend
Positive_Threshold_Display.SetScalarBarVisibility(renderView1, True)
# Rename it to something nicer
RenameSource('Positive_Threshold', Positive_Threshold)
### MAKE BACKGROUND TRANSPARENT
print "Showing background with opacity : " + str(BackgroundOpacityValue)
# turn off scalar coloring
ColorBy(Data_Display_Background, None)
# Properties modified on p_seq9_1_22vtkDisplay
Data_Display_Background.Opacity = BackgroundOpacityValue
### DISPLAY DATA IN SCENE WITH TIMESTEPS
if not DefaultSclarBarPosition:
scalarbar = GetScalarBar(uLUT)
scalarbar.Position = ScalarBarPos
# reset view to fit data
renderView1.ResetCamera()
# Make it create the scene
Render()
# Get the animation time steps - this does nothing if only 1 file loaded
# get animation scene
animationScene1 = GetAnimationScene()
# update animation scene based on data timesteps
animationScene1.UpdateAnimationUsingDataTimeSteps()
def ShowSliceData(Data, DirectionString, Centre = None, ColourMapRange = None, ColourMapLegend = 'SigmaProbably', ScalarBarPos = None):
# make slice in Data of given direction
#### DEFAULTS
DefaultCentre = 0
DefaultColorMap = 0
DefaultSclarBarPosition = 0
SliceNormal = [0.0, 0.0, 0.0]
#### CHECK INPUTS
if Centre == None:
print "Using Default Centre"
DefaultCentre = 1
if ColourMapRange == None:
DefaultColorMap = 1
print "Using default colormaps"
DataRange = Data.CellData[0].GetRange(0)
print "Data range : " + str(DataRange)
DataMax = round(max(abs(i) for i in DataRange))
print "Data max : " + str(DataMax)
ColourMapRange = [-DataMax, DataMax]
LegitStrings = ['x', 'y', 'z']
camDirection = 1
DirectionString = DirectionString.lower()
if DirectionString.startswith('-'):
camDirection = -1
DirectionString = DirectionString[1]
#print "negative direction"
if DirectionString in LegitStrings:
dimIdx = LegitStrings.index(DirectionString)
SliceNormal[dimIdx] = 1.0
else:
print "DONT UNDERSTAND INPUT"
return
if ScalarBarPos == None:
print "Using Default ScalarBar Position"
DefaultSclarBarPosition = 1
###### CODE STARTS HERE
# create a new 'Rename the Source something useful'
RenameSource('Data', Data)
# get active view
renderView1 = GetActiveViewOrCreate('RenderView')
# show data in view
Data_Display = Show(Data, renderView1)
Render()
# find the name of the array
CellData = Data.CellData[0]
ColourMapName = CellData.Name
print "ColourMapName data is: " + ColourMapName
uLUT = GetColorTransferFunction(ColourMapName)
uLUT.RGBPoints = [-1, 0.231373, 0.298039, 0.752941, 0, 0.865003, 0.865003, 0.865003, 1, 0.705882, 0.0156863,
0.14902]
uLUT.ScalarRangeInitialized = 1.0
# get opacity transfer function/opacity map for ColourMapName
uPWF = GetOpacityTransferFunction(ColourMapName)
uPWF.Points = [-1, 0.0, 0.5, 0.0, 1, 1.0, 0.5, 0.0]
uPWF.ScalarRangeInitialized = 1
#### CHANGE SCALE TO DATA
# Rescale transfer function
uLUT.RescaleTransferFunction(ColourMapRange[0], ColourMapRange[1])
# Rescale transfer function
uPWF.RescaleTransferFunction(ColourMapRange[0], ColourMapRange[1])
### RENAME STUFF
print "Setting colourmap legend text to : " + ColourMapLegend
# reset view to fit data
renderView1.ResetCamera()
# show color bar/color legend
Data_Display.SetScalarBarVisibility(renderView1, True)
if DefaultCentre == 1:
bounds = Data.GetDataInformation().GetBounds()
bounds_dx = bounds[1] - bounds[0]
bounds_dy = bounds[3] - bounds[2]
bounds_dz = bounds[5] - bounds[4]
bounds_cx = (bounds[0] + bounds[1]) / 2.0
bounds_cy = (bounds[2] + bounds[3]) / 2.0
bounds_cz = (bounds[4] + bounds[5]) / 2.0
Centre = [bounds_cx, bounds_cy, bounds_cz]
print "Centre of Slice :" + str(Centre)
# create a new 'Slice'
slice1 = Slice(Input=Data)
slice1.SliceType = 'Plane'
slice1.SliceOffsetValues = [0.0]
# init the 'Plane' selected for 'SliceType'
slice1.SliceType.Origin = Centre
print "Slice Normal : " + str(SliceNormal)
# set direction of slice
slice1.SliceType.Normal = SliceNormal
# # show data in view
slice1Display = Show(slice1, renderView1)
# # trace defaults for the display properties.
slice1Display.ColorArrayName = ['CELLS', ColourMapName]
slice1Display.LookupTable = uLUT
# hide data in view
Hide(Data, renderView1)
# set active source to get rid of the stuff in the
SetActiveSource(Data)
if camDirection == -1:
DirectionString = '-' + DirectionString
SetCamera(Data, DirectionString)
if DefaultColorMap == 1:
SliceRange = slice1.CellData[0].GetRange(0)
print "Slice range : " + str(SliceRange)
SliceMax = round(max(abs(i) for i in SliceRange))
print "Slice max : " + str(SliceMax)
uLUT.RescaleTransferFunction(-SliceMax, SliceMax)
uPWF.RescaleTransferFunction(-SliceMax, SliceMax)
else:
# Rescale transfer function
uLUT.RescaleTransferFunction(ColourMapRange[0], ColourMapRange[1])
# Rescale transfer function
uPWF.RescaleTransferFunction(ColourMapRange[0], ColourMapRange[1])
# get color legend/bar for uLUT in view renderView1
uLUTColorBar = GetScalarBar(uLUT, renderView1)
# Properties modified on uLUTColorBar
uLUTColorBar.Title = ColourMapLegend
if not DefaultSclarBarPosition:
scalarbar = GetScalarBar(uLUT)
scalarbar.Position = ScalarBarPos
# reset view to fit data
renderView1.ResetCamera()
Render()
# Get the animation time steps - this does nothing if only 1 file loaded
# get animation scene
animationScene1 = GetAnimationScene()
# update animation scene based on data timesteps
animationScene1.UpdateAnimationUsingDataTimeSteps()
def ShowSphere(Centre, Radius = None, Name = 'ExPosition'):
# create a sphere source at a given position
if Radius is None:
Radius = 5
sphere1 = Sphere()
sphere1.Center = Centre
sphere1.Radius = Radius
# Properties modified on sphere1
sphere1.ThetaResolution = 16
sphere1.PhiResolution = 16
RenameSource(Name, sphere1)
renderView1 = GetActiveViewOrCreate('RenderView')
# show data in view
sphere1Display = Show(sphere1, renderView1)
# reset view to fit data
renderView1.ResetCamera()
Render()
def ShowSphereCSV(CSVfile, Radius = None, TimePoint = None, Name_prefix = None):
# show a spehere based on position in a csv file
# convert to absolute path
CSVfile = os.path.abspath(CSVfile)
if TimePoint is None:
animationScene1 = GetAnimationScene()
TimePoint = int(animationScene1.AnimationTime)
print "Timepoint from Animation step value is: " + str(TimePoint)
else:
print "User Set timepoint " + str(TimePoint)
if Name_prefix is None:
Name_prefix = 'ExPos_'
# read the specific line from the csv file
Centre = [0.0, 0.0, 0.0]
count = 0
with open(CSVfile) as f:
r = csv.reader(f)
for row in r:
#print "Current row :" + str(row)
if count == TimePoint:
#print "found it"
Centre = [float(i) for i in row]
break
count += 1
print "Pos is now : " + str(Centre)
# make sphere with this centre
ShowSphere(Centre, Radius, Name_prefix + str(TimePoint))
def ShowSphereCSVClip(Data,CSVfile, Radius = None, Name = None):
CSVfile = ConvertFilenames(CSVfile)
CSVfile = CSVfile.replace('\\', '/')
print "filename is " + str(CSVfile)
if Radius is None:
Radius = 5
if Name is None:
Name = 'IdealPosition'
# create a new 'Programmable Filter'
programmableFilter1 = ProgrammableFilter(Input=Data)
RenameSource(Name, programmableFilter1)
programmableFilter1.RequestInformationScript = ''
programmableFilter1.RequestUpdateExtentScript = ''
programmableFilter1.PythonPath = ''
# Properties modified on programmableFilter1
programmableFilter1.Script = 'csvfilename = \'' + CSVfile + '\'\nimport vtk\nimport csv\ninput = self.GetInputDataObject(0, 0)\noutput = self.GetOutputDataObject(0)\n\nt = self.GetInputDataObject(0,0).GetInformation().Get(vtk.vtkDataObject.DATA_TIME_STEP())\nTimePoint = int(t)\n # read the specific line from the csv file\nCentre = [0.0, 0.0, 0.0]\ncount = 0\n\nwith open(csvfilename) as f:\n r = csv.reader(f)\n for row in r:\n #print "Current row :" + str(row)\n if count == TimePoint:\n # print "found it"\n Centre = [float(i) for i in row]\n break\n count += 1\n\n #print "Pos is now : " + str(Centre)\n\n\ns = vtk.vtkSphere()\ns.SetCenter(Centre)\ns.SetRadius(' + str(Radius) + ')\n\nclip = vtk.vtkClipDataSet()\nclip.SetInputDataObject(input)\nclip.SetClipFunction(s)\nclip.SetValue(0.0)\nclip.InsideOutOn()\nclip.Update()\n#print clip\n\noutput.ShallowCopy(clip.GetOutputDataObject(0))\n'
# get active view
renderView1 = GetActiveViewOrCreate('RenderView')
# show data in view
programmableFilter1Display = Show(programmableFilter1, renderView1)
# turn off scalar coloring
ColorBy(programmableFilter1Display, None)
Render()
def LoadCameraFile(CameraFilename):
# heavily based on the code posted here https://www.mail-archive.com/paraview@paraview.org/msg20341.html
# convert file name to absolute path in linuxy format
CameraFileNameAbs = os.path.abspath(CameraFilename)
print "Loading camera file : " + CameraFileNameAbs
# initialise variables
CamPosition = [0.0, 0.0, 0.0]
CamFocus = [0.0, 0.0, 0.0]
CamViewUp = [0.0, 0.0, 0.0]
CamParallelScale = 0.0
CamCentreofRot = [0.0, 0.0, 0.0]
CamViewAngle = 0
# use XML parser to read attributes in file
tree = ET.parse(CameraFileNameAbs)
root = tree.getroot()
# get the attributes stored in the file
for child in root[0]:
if child.attrib['name'] == 'CameraPosition':
for subChild in child:
CamPosition[int(subChild.attrib['index'])] = float(subChild.attrib['value'])
if child.attrib['name'] == 'CameraViewUp':
for subChild in child:
CamViewUp[int(subChild.attrib['index'])] = float(subChild.attrib['value'])
if child.attrib['name'] == 'CameraParallelScale':
CamParallelScale = float(child[0].attrib['value'])
if child.attrib['name'] == 'CameraFocalPoint':
for subChild in child:
CamFocus[int(subChild.attrib['index'])] = float(subChild.attrib['value'])
if child.attrib['name'] == 'CenterOfRotation':
for subChild in child:
CamCentreofRot[int(subChild.attrib['index'])] = float(subChild.attrib['value'])
if child.attrib['name'] == 'CameraViewAngle':
CamViewAngle = float(child[0].attrib['value'])
print "CameraPosition is now: " + str(CamPosition)
print "CameraViewUp is now: " + str(CamViewUp)
print "CameraFocus: " + str(CamFocus)
print "CameraParallelScale is now: " + str(CamParallelScale)
print "CameraCentreOfRotation is now: " + str(CamCentreofRot)
print "CameraViewAngle is now: " + str(CamViewAngle)
# set the positions
view = GetRenderView()
view.CameraViewUp = CamViewUp
view.CameraPosition = CamPosition
view.CameraFocalPoint = CamFocus
view.CameraParallelScale = CamParallelScale
view.CenterOfRotation = CamCentreofRot
view.CameraViewAngle = CamViewAngle
Render()
#view.ResetCamera()
#ResetCamera()
def SetCamera(Data, DirectionString):
# This code is heavily influenced by (stolen from) http://comments.gmane.org/gmane.comp.science.paraview.user/15091
LegitStrings = ['x', 'y', 'z']
camDirection = 1
DirectionString = DirectionString.lower()
if DirectionString.startswith('-'):
camDirection = -1
DirectionString = DirectionString[1]
print "negative direction"
if DirectionString in LegitStrings:
dimMode = LegitStrings.index(DirectionString)
else:
print "DONT UNDERSTAND INPUT"
return
bounds = Data.GetDataInformation().GetBounds()
bounds_dx = bounds[1] - bounds[0]
bounds_dy = bounds[3] - bounds[2]
bounds_dz = bounds[5] - bounds[4]
bounds_cx = (bounds[0] + bounds[1]) / 2.0
bounds_cy = (bounds[2] + bounds[3]) / 2.0
bounds_cz = (bounds[4] + bounds[5]) / 2.0
if dimMode == 2:
# xy Z equivalent
camUp = [0.0, 1.0, 0.0]
#pos = max(bounds_dx, bounds_dy)
pos = bounds_cz + camDirection * bounds_dz
camPos = [bounds_cx, bounds_cy, pos]
camFoc = [bounds_cx, bounds_cy, bounds_cz]
elif dimMode == 1:
# xz Y equivalent
camUp = [0.0, 0.0, 1.0]
# pos = 2* max(bounds_dx, bounds_dz) #make it twice as far away to ensure we are
pos = bounds_cy + camDirection * bounds_dy
camPos = [bounds_cx, pos, bounds_cz]
camFoc = [bounds_cx, bounds_cy, bounds_cz]
elif dimMode == 0:
# yz - X equivalent
camUp = [0.0, 0.0, 1.0]
#pos = max(bounds_dy, bounds_dz)
pos = bounds_cx + camDirection * bounds_dx
camPos = [pos, bounds_cy, bounds_cz] # changed to match the GUI buttons
camFoc = [bounds_cx, bounds_cy, bounds_cz]
else:
print "What?"
# configure the view
# width = 1024
# height = int(width*aspect)
print "Position set! : " + str(camPos)
view = GetRenderView()
view.CameraViewUp = camUp
view.CameraPosition = camPos
view.CameraFocalPoint = camFoc
# view.UseOffscreenRenderingForScreenshots = 0
# view.CenterAxesVisibility = 0
# view.OrientationAxesVisibility = 0
# view.ViewSize = [width, height]
Render()
view.ResetCamera()
ResetCamera()
# for fine tuning, not having this as input at the moment as there is already too many
config_camZoom = 1.0
cam = GetActiveCamera()
cam.Zoom(config_camZoom)
print "Position after camera reset : " + str(camPos)
def ConvertFilenames(Filenames_input):
if type(Filenames_input) == list:
full_filenames = Filenames_input
for iName in range(len(Filenames_input)):
# print iName
full_filenames[iName] = os.path.abspath(Filenames_input[iName])
print full_filenames
else:
full_filenames = os.path.abspath(Filenames_input)
return full_filenames
def SaveAnimation(OutputFilename, FrameRateVal, MagnificationVal = 1.0, OrientationAxisVisible = 0):
# Ensure output in correct format
view = GetRenderView()
view.OrientationAxesVisibility = OrientationAxisVisible
Render()
OutputFilename=ConvertFilenames(OutputFilename)
# Create file based on magnification and FrameRate
WriteAnimation(OutputFilename, Magnification=MagnificationVal, FrameRate=FrameRateVal, Compression=True)
view.ResetCamera()
Render()
# Make it create the scene
Render()
def SaveGif(PngName, FrameRate):
# graphicsmagick uses "ticks" of 10ms when making gif, so we need to convert from frame rate to this
gif_delay = int(round((1.0 / FrameRate) * 100.0, 0)) # rounding to nearest int
# get the full path of where the pngs have been saved
fullpath_out = ConvertFilenames(PngName)
path_out = os.path.dirname(fullpath_out)
filename = os.path.splitext(os.path.basename(fullpath_out))[0]
# paraview names all the files like example.0001.png etc. so we need the glob string to get this: example*.png
glob_string = os.path.join(path_out, filename) + "*.png"
gif_string = os.path.join(path_out, filename) + ".gif"
# create string
graphicsmagick_string = "gm convert -delay " + str(gif_delay) + " " + glob_string + " " + gif_string
print "Making Gif using string: "
print graphicsmagick_string
os.system(graphicsmagick_string) # make the .gif
def SaveVideo(PngName, FrameRate):
# get the full path of where the pngs have been saved
fullpath_out = ConvertFilenames(PngName)
path_out = os.path.dirname(fullpath_out)
filename = os.path.splitext(os.path.basename(fullpath_out))[0]
# paraview names all the files like example.0001.png etc. ffmpeg needs a c-like formatting like example.0%3d.png as globbing only works on linux
list_string = os.path.join(path_out, filename) + ".%4d.png"
mp4_string = os.path.join(path_out, filename) + ".mp4"
# -c:v libx264 makes sure it works on older players
# -vf \"fps=25,format=yuv420p\" specifies a 25fps frame rate anyway - this makes it smooth for slower frame rates and fixes bugs with first frames and stuff
# -y auto yesses to overwritting files etc.
ffmpeg_string = "ffmpeg -framerate " + str(FrameRate) + " -i " + list_string + " -c:v libx264 -vf \"fps=25,format=yuv420p\" " + " " + mp4_string + " -y"
print "Making Video using string: "
print ffmpeg_string
os.system(ffmpeg_string)
|
EIT-team/Reconstruction
|
src/python/ParaviewLoad/ShowData.py
|
Python
|
gpl-3.0
| 21,754
|
[
"ParaView",
"VTK"
] |
7d4f0affc3bfbf2e0cbd908492c90b43741b5c9d7e790931686b5473fd005f9d
|
import subprocess
import sys
import os
from collections import defaultdict
from scipy.interpolate import UnivariateSpline
from scipy.signal import fftconvolve
import numpy as np
import DataStructures
import Units
import RotBroad
import MakeModel_v2 as MakeModel
import FindContinuum
import FitsUtils
class Resid:
def __init__(self, size):
wave = np.zeros(size)
rect = np.zeros(size)
opt = np.zeros(size)
recterr = np.zeros(size)
opterr = np.zeros(size)
cont = np.zeros(size)
"""
#This function rebins (x,y) data onto the grid given by the array xgrid
def RebinData(data,xgrid):
Model = UnivariateSpline(data.x, data.y, s=0)
newdata = DataStructures.xypoint(xgrid.size)
newdata.x = np.copy(xgrid)
newdata.y = Model(newdata.x)
left = np.searchsorted(data.x, (3*xgrid[0]-xgrid[1])/2.0)
search = np.searchsorted
mean = np.mean
for i in range(xgrid.size-1):
right = search(data.x, (xgrid[i]+xgrid[i+1])/2.0)
newdata.y[i] = mean(data.y[left:right])
left = right
right = search(data.x, (3*xgrid[-1]-xgrid[-2])/2.0)
newdata.y[xgrid.size-1] = np.mean(data.y[left:right])
return newdata
#This function reduces the resolution by convolving with a gaussian kernel
def ReduceResolution(data,resolution, cont_fcn=None, extend=True, nsigma=8):
centralwavelength = (data.x[0] + data.x[-1])/2.0
xspacing = data.x[1] - data.x[0] #NOTE: this assumes constant x spacing!
FWHM = centralwavelength/resolution;
sigma = FWHM/(2.0*np.sqrt(2.0*np.log(2.0)))
left = 0
right = np.searchsorted(data.x, 10*sigma)
x = np.arange(0,nsigma*sigma, xspacing)
gaussian = np.exp(-(x-float(nsigma)/2.0*sigma)**2/(2*sigma**2))
if extend:
#Extend array to try to remove edge effects (do so circularly)
before = data.y[-gaussian.size/2+1:]
after = data.y[:gaussian.size/2]
extended = np.append(np.append(before, data.y), after)
first = data.x[0] - float(int(gaussian.size/2.0+0.5))*xspacing
last = data.x[-1] + float(int(gaussian.size/2.0+0.5))*xspacing
x2 = np.linspace(first, last, extended.size)
conv_mode = "valid"
else:
extended = data.y.copy()
x2 = data.x.copy()
conv_mode = "same"
newdata = DataStructures.xypoint(data.x.size)
newdata.x = np.copy(data.x)
if cont_fcn != None:
cont1 = cont_fcn(newdata.x)
cont2 = cont_fcn(x2)
cont1[cont1 < 0.01] = 1
newdata.y = np.convolve(extended*cont2, gaussian/gaussian.sum(), mode=conv_mode)/cont1
else:
newdata.y = np.convolve(extended, gaussian/gaussian.sum(), mode=conv_mode)
return newdata
"""
# Ensure a directory exists. Create it if not
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
currentdir = os.getcwd() + "/"
homedir = os.environ["HOME"]
outfiledir = currentdir + "Cross_correlations/"
modeldir = homedir + "/School/Research/Models/Sorted/Stellar/"
gridspacing = "2e-4"
minvel = -1000 #Minimum velocity to output, in km/s
maxvel = 1000
star_list = ["M2", "M1", "M0", "K9", "K8", "K7", "K6", "K5", "K4", "K3", "K2", "K1", "K0", "G9", "G8", "G7", "G6", "G5",
"G4", "G3", "G2", "G1", "G0", "F9", "F8", "F7", "F6", "F5", "F4", "F3", "F2", "F1"]
temp_list = [3000, 3200, 3400, 3600, 3800, 4000, 4200, 4400, 4600, 4800, 5000, 5100, 5200, 5225, 5310, 5385, 5460, 5545,
5625, 5700, 5770, 5860, 5940, 6117, 6250, 6395, 6512, 6650, 6775, 6925, 7050, 7185]
model_list = [modeldir + "lte30-3.50-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte32-3.50-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte34-3.50-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte36-4.00-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte38-4.00-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte40-4.00-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte42-4.00-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte44-4.00-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte46-4.00-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte48-4.00-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte50-4.00-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte51-4.00-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte52-4.00-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte52-4.00-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte53-4.00-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte54-4.00-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte55-4.00-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte55-4.00-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte56-4.00-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte57-4.00-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte58-4.00-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte59-4.00-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte59-4.00-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte61-4.00-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte63-4.00-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte64-4.00-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte65-4.00-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte67-4.50-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte68-4.50-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte69-4.50-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte70-4.50-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted",
modeldir + "lte72-4.50-0.0.AGS.Cond.PHOENIX-ACES-2009.HighRes.7.sorted"]
#This will do the correlation within python/np
#The combine keyword decides whether to combine the chips into a master cross-correlation
#The normalize keyword decides whether to output as correlation power, or as significance
#The sigmaclip keyword decides whether to perform sigma-clipping on each chip before cross-correlating
#The nsigma keyword tells the program how many sigma to clip. This is ignored if sigmaclip = False
#The clip_order keyword tells what order polynomial to fit the flux to during sigma clipping. Ignored if sigmaclip = False
#The models keyword is a list of models to cross-correlate against (either filenames of two-column ascii files, or
# each entry should be a list with the first element the x points, and the second element the y points
#The segments keyword controls which orders of the data to use, and which parts of them. Can be used to ignore telluric
# contamination. Can be a string (default) which will use all of the orders, a list of integers which will
# use all of the orders given in the list, or a dictionary of lists which gives the segments of each order to use.
#The save_output keyword tells whether to save the cross-correlation or just return the arrays
#The vsini keyword determines how much to rotationally broaden the model spectrum before
# cross-correlating
#The resolution keyword determines the detector resolution
def PyCorr(filename, combine=True, normalize=False, sigmaclip=False, nsigma=3, clip_order=3, models=model_list,
segments="all", vsini=15 * Units.cm / Units.km, resolution=60000, save_output=True, outdir=outfiledir,
outfilename=None):
ensure_dir(outdir)
#1: Read in the datafile, if necessary
if type(filename) == str:
print "Reading filename %s" % filename
orders = FitsUtils.MakeXYpoints(filename, extensions=True, x="wavelength", y="flux", errors="error")
elif type(filename) == list:
orders = list(filename)
else:
sys.exit("Error! Not sure what to do with input to PyCorr!!")
makefname = False
if outfilename == None:
makefname = True
#2: Interpolate data to a single constant wavelength grid in logspace
maxsize = 0
for order in orders:
if order.size() > maxsize:
maxsize = order.size()
data = DataStructures.xypoint(len(orders) * maxsize)
data.x = np.linspace(np.log10(orders[-1].x[0]), np.log10(orders[0].x[-1]), data.x.size)
data.y = np.ones(data.x.size)
data.err = np.ones(data.x.size)
data.cont = np.ones(data.cont.size)
firstindex = 1e9
for i in range(len(orders)):
order = orders[i]
order_sections = [[-1, 1e9], ]
#Use this order? Use all of it?
if type(segments) != str:
if type(segments) == list:
for element in segments:
if element == i + 1:
#Use all of this order
break
elif type(segments) == defaultdict or type(segments) == dict:
try:
order_sections = segments[i + 1]
except KeyError:
order_sections = [[-1, -1], ]
#Sigma-clipping?
if sigmaclip:
done = False
wave = order.x.copy()
flux = order.y.copy()
while not done:
done = True
fit = np.poly1d(np.polyfit(wave, flux, clip_order))
residuals = flux - fit(wave)
mean = np.mean(residuals)
std = np.std(residuals)
badindices = np.where(np.abs(residuals - mean) > nsigma * std)[0]
flux[badindices] = fit(wave[badindices])
if badindices.size > 0:
done = False
order.y = flux.copy()
#Interpolate to constant wavelength grid (in log-space)
FLUX = UnivariateSpline(np.log10(order.x), order.y, s=0)
ERR = UnivariateSpline(np.log10(order.x), order.err, s=0)
CONT = UnivariateSpline(np.log10(order.x), order.cont, s=0)
for section in order_sections:
left = np.searchsorted(order.x, section[0])
right = np.searchsorted(order.x, section[1])
if right == left:
continue
if right > 0:
right -= 1
left = np.searchsorted(data.x, np.log10(order.x[left]))
right = np.searchsorted(data.x, np.log10(order.x[right]))
if right > firstindex:
#Take the average of the two overlapping orders
data.y[firstindex:right] = (data.y[firstindex:right] / data.cont[firstindex:right] + FLUX(
data.x[firstindex:right]) / CONT(data.x[firstindex:right])) / 2.0
right = firstindex
data.y[left:right] = FLUX(data.x[left:right])
data.err[left:right] = ERR(data.x[left:right])
data.cont[left:right] = CONT(data.x[left:right])
firstindex = left
#3: Begin loop over model spectra
for i in range(len(models)):
modelfile = models[i]
temp = int(modelfile.split("lte")[-1][:2]) * 100
star = str(temp)
#a: Read in file
if isinstance(modelfile, str):
print "******************************\nReading file ", modelfile
x, y = np.loadtxt(modelfile, usecols=(0, 1), unpack=True)
x = x * Units.nm / Units.angstrom
y = 10 ** y
else:
x = modelfile[0]
y = modelfile[1]
left = np.searchsorted(x, 2 * 10 ** data.x[0] - 10 ** data.x[-1])
right = np.searchsorted(x, 2 * 10 ** data.x[-1] - 10 ** data.x[0])
#left = np.searchsorted(x, 10**data.x[0])
#right = np.searchsorted(x, 10**data.x[-1])
model = DataStructures.xypoint(right - left + 1)
x2 = x[left:right].copy()
y2 = y[left:right].copy()
MODEL = UnivariateSpline(x2, y2, s=0)
#b: Make wavelength spacing constant
model.x = np.linspace(x2[0], x2[-1], right - left + 1)
model.y = MODEL(model.x)
#c: Find continuum by fitting model to a quadratic.
model.cont = FindContinuum.Continuum(model.x, model.y, fitorder=4)
#d: Convolve to a resolution of 60000
model = FittingUtilities.ReduceResolution(model.copy(), resolution, extend=False)
#e: Rotationally broaden
#model = RotBroad.Broaden(model, vsini)
#f: Convert to log-space
MODEL = UnivariateSpline(model.x, model.y, s=0)
CONT = UnivariateSpline(model.x, model.cont, s=0)
model.x = np.linspace(np.log10(model.x[0]), np.log10(model.x[-1]), model.x.size)
model.y = MODEL(10 ** model.x)
model.cont = CONT(10 ** model.x)
#g: Rebin to the same spacing as the data (but not the same pixels)
xgrid = np.arange(model.x[0], model.x[-1], data.x[1] - data.x[0])
model = FittingUtilities.RebinData(model.copy(), xgrid)
#h: Cross-correlate
data_rms = np.sqrt(np.sum((data.y / data.cont - 1) ** 2))
model_rms = np.sqrt(np.sum((model.y / model.cont - 1) ** 2))
left = np.searchsorted(model.x, data.x[0])
right = model.x.size - np.searchsorted(model.x, data.x[-1])
delta = left - right
print "Cross-correlating..."
#np.savetxt("corr_inputdata.dat", np.transpose((10**data.x, data.y/data.cont)))
#np.savetxt("corr_inputmodel.dat", np.transpose((10**model.x, model.y/model.cont)))
#ycorr = np.correlate(data.y/data.cont-1.0, model.y/model.cont-1.0, mode="full")
ycorr = fftconvolve((data.y / data.cont - 1.0)[::-1], model.y / model.cont - 1.0, mode="full")[::-1]
xcorr = np.arange(ycorr.size)
lags = xcorr - (model.x.size + data.x.size - delta - 1.0) / 2.0
distancePerLag = model.x[1] - model.x[0]
offsets = -lags * distancePerLag
velocity = offsets * 3e5 * np.log(10.0)
corr = DataStructures.xypoint(velocity.size)
corr.x = velocity[::-1]
corr.y = ycorr[::-1] / (data_rms * model_rms)
#My version at home has a bug in np.correlate, reversing ycorr
#BUG FIXED IN THE PYTHON VERSION I HAVE FOR LINUX MINT 13
#if "linux" in sys.platform:
# corr.y = corr.y[::-1]
#i: Fit low-order polynomal to cross-correlation
left = np.searchsorted(corr.x, minvel)
right = np.searchsorted(corr.x, maxvel)
vel = corr.x[left:right]
corr = corr.y[left:right]
fit = np.poly1d(np.polyfit(vel, corr, 2))
#j: Adjust correlation by fit
corr = corr - fit(vel)
if normalize:
mean = np.mean(corr)
std = np.std(corr)
corr = (corr - mean) / std
#k: Finally, output or return
if save_output:
if makefname:
outfilename = outdir + filename.split("/")[-1] + "." + star
print "Outputting to ", outfilename, "\n"
np.savetxt(outfilename, np.transpose((vel, corr)), fmt="%.8g")
else:
return vel, corr
if __name__ == "__main__":
if len(sys.argv) > 1:
for fname in sys.argv[1:]:
PyCorr(fname) #, combine=False, sigmaclip=False)
|
kgullikson88/TS23-Scripts
|
Correlate.temp.py
|
Python
|
gpl-3.0
| 15,540
|
[
"Gaussian"
] |
b865004e06df7a1edb4197e5d590f61ece30902c71dbc0d677b36a4a9475c311
|
# Some Attributes derived from the Eigenvalues of Orientation Tensor
#
# Calculates a series os attributes based on the eigenvalues of an orientation tensor
#
# Cline = (e2-e3)/(e2+e3) (Bakker, 2001)
# Cplane = (e1-e2)/(e1+e2) (Bakker, 2001)
# Cfault = Cline * (1 - Cplane) (Bakker, 2001)
# Cchaos = 4*e1*e3*(e1+e2+e3)/(e1+e3)^2 (Wang etal, 2009)
# Ctype = ((e1-e2)^2 + (e1-e3)^2 + (e2-e3)^2) / (e1^2 + e2^2 + e3^2) (Haubecker and Jahne, 1996)
#
# The orientation tensor is derived from the coefficients of a local polynomial
# approximation (3D 2nd order polynomial using gaussian weighted least squares) as proposed by Farneback.
#
# The eigenvalues are numbered in decreasing order of their magnitude.
#
import sys,os
import numpy as np
#
# Import the module with the I/O scaffolding of the External Attribute
#
sys.path.insert(0, os.path.join(sys.path[0], '..', '..'))
import extattrib as xa
import extlib as xl
#
# These are the attribute parameters
#
xa.params = {
'Input': 'Input',
'Output': ['Cline', 'Cplane', 'Cfault', 'Cchaos', 'Ctype'],
'ZSampMargin' : {'Value':[-1,1], 'Symmetric': True},
'StepOut' : {'Value': [1,1]},
'Par_0': {'Name': 'Weight Factor', 'Value': 0.2},
'Help': 'http://waynegm.github.io/OpendTect-Plugin-Docs/External_Attributes/LPA_Attributes/'
}
#
# Define the compute function
#
def doCompute():
xs = xa.SI['nrinl']
ys = xa.SI['nrcrl']
zs = xa.params['ZSampMargin']['Value'][1] - xa.params['ZSampMargin']['Value'][0] + 1
wf = xa.params['Par_0']['Value']
kernel = lpa3D_init(xs, ys, zs, wf)
gam = 1/(8*((min(xs,ys,zs)-1)*wf)**2)
while True:
xa.doInput()
r = np.zeros((10,xa.TI['nrsamp']))
for i in range(0,10):
r[i,:] = xl.sconvolve(xa.Input,kernel[i])
A = np.rollaxis(np.array([[r[4],r[7]/2, r[8]/2], [r[7]/2, r[5], r[9]/2], [r[8]/2, r[9]/2, r[6]]]),2)
AAT = np.einsum('...ij,...jk->...ik', A, np.swapaxes(A,1,2))
B = np.rollaxis(np.array([[r[1]],[r[2]],[r[3]]]),2)
BBT = np.einsum('...ij,...jk->...ik', B, np.swapaxes(B,1,2))
T = AAT+gam*BBT
w = np.linalg.eigvalsh(T)
v=np.rollaxis(np.sort(w),1)
e1 = v[2,:]
e2 = v[1,:]
e3 = v[0,:]
e1me2 = e1-e2
e1me3 = e1-e3
e2me3 = e2-e3
e1pe3 = e1+e3
xa.Output['Cline'] = e2me3/(e2+e3)
xa.Output['Cplane'] = e1me2/(e1+e2)
xa.Output['Cfault'] = xa.Output['Cline']*(1.0 - xa.Output['Cplane'])
xa.Output['Cchaos'] = 4.0 * e1 * e3 * (e1 + e2 + e3)/(e1pe3*e1pe3)
xa.Output['Ctype'] = (e1me2*e1me2 + e1me3*e1me3 + e2me3*e2me3)/(e1*e1 + e2*e2 + e3*e3)
xa.doOutput()
#
# Find the LPA solution for a 2nd order polynomial in 3D
#
def lpa3D_init( xs, ys, zs, sigma=0.2 ):
sx = sigma * (xs-1)
sy = sigma * (ys-1)
sz = sigma * (zs-1)
hxs = (xs-1)/2
hys = (ys-1)/2
hzs = (zs-1)/2
xtmp = np.linspace(-hxs,hxs,xs)
ytmp = np.linspace(-hys,hys,ys)
ztmp = np.linspace(-hzs,hzs,zs)
xyz = np.meshgrid(xtmp,ytmp,ztmp, indexing='ij')
x = xyz[0].flatten()
y = xyz[1].flatten()
z = xyz[2].flatten()
w = np.exp(-(x**2/(2*sx**2) + y**2/(2*sy**2) + z**2/(2*sz**2)))
W = np.diagflat(w)
A = np.dstack((np.ones(x.size), x, y, z, x*x, y*y, z*z, x*y, x*z, y*z)).reshape((x.size,10))
DB = np.linalg.inv(A.T.dot(W).dot(A)).dot(A.T).dot(W)
return DB.reshape((10,xs,ys,zs))
#
# Assign the compute function to the attribute
#
xa.doCompute = doCompute
#
# Do it
#
xa.run(sys.argv[1:])
|
waynegm/OpendTect-Plugins
|
bin/python/wmpy/Experimental/LocalPolynomialApproximation/ex_lpa_cvals.py
|
Python
|
gpl-3.0
| 3,302
|
[
"Gaussian"
] |
5398766ca658009768ed5d3c1359d74aa27cf8c736be4eda685c3598300448a5
|
import numpy as np
from PIL import Image
from appJar import gui
from simple_operations.check_if_image_grey import check_if_image_grey
image_set = False
i = 0
# create a GUI variable
img_process_gui = gui("Image Processing")
img_process_gui.setGeometry("fullscreen")
img_process_gui.setBg("#c2c6c3")
img_process_gui.setFont(17)
from ImageEdit import ImageEdit
import matplotlib.pyplot as plt
import os
def update_image():
"""
The function updates the image that is shown in the gui
:return: nothing
"""
global ed_img
global image_set
if image_set:
img_process_gui.reloadImage("show_image", showed_image)
ed_img = ImageEdit(showed_image)
else:
img_process_gui.addImage("show_image", showed_image)
ed_img = ImageEdit(showed_image)
image_set = True
def create_statistics(title):
"""
Creates a new window with the wished statistics
:param title: The shown statistics title
:return: nothing
"""
img_process_gui.startSubWindow(title, modal=True, blocking=True)
img_process_gui.addImage(title, "temporary1.png")
img_process_gui.showSubWindow(title)
os.remove("temporary1.png")
img_process_gui.destroySubWindow(title)
def clean():
"""
Deletes the temporary data before closing the program
:return: true
"""
if os.path.isfile("temporary.png"):
os.remove("temporary.png")
return True
# handle events
def press(button):
"""
Handles the press on the buttons Save and Load
:param button: name of the pressed button
:return: nothing
"""
global showed_image
global image_set
if button == "Save":
if image_set:
path = img_process_gui.saveBox(title='Image', fileName='img', dirName=None, fileExt=".png")
save_image = Image.open('temporary.png')
save_image.save(path)
else:
img_process_gui.infoBox("Can't Save Nothing", "You first have to load a picture before you can save one.")
else:
showed_image = img_process_gui.openBox()
save_image = Image.open(showed_image)
save_image.save('temporary.png')
update_image()
def choose_statistics(option):
"""
Handles statics for current image on the menu options in the statistics menu bar
:param option: the chosen option
:return: nothing
"""
global i
global showed_image
if option == "color proportions":
piechart = ed_img.color_proportion()
explode = (0, 0, 0)
labels = ['Red', 'Green', 'Blue']
colors = ['red', 'green', 'blue']
plt.pie(piechart, explode=explode, labels=labels, colors=colors, autopct='%1.1f%%', shadow=True, startangle=140)
plt.ylabel("Farbanteil")
plt.savefig("temporary1.png")
create_statistics("Color proportions" + str(i))
i+=1
elif option == "histogram":
histogr = ed_img.histogram()
plt.bar(np.arange(255), histogr)
plt.ylabel("Grauwertanzahl")
plt.savefig("temporary1.png")
create_statistics("Histogram"+str(i))
i+=1
return
elif option == "cumulative histogram":
histogr = ed_img.histogram()
cumul_histogr = ed_img.cumulative_histogram(histogr)
plt.bar(np.arange(255), cumul_histogr)
plt.ylabel("Grauwertanzahl")
plt.savefig("temporary1.png")
create_statistics("Cumulative histogram"+str(i))
i+=1
return
elif option == "histogram equalization":
histogr = ed_img.histogram_equalization()
plt.bar(np.arange(255), histogr)
plt.xlabel("Equalized histogram")
plt.savefig("temporary1.png")
create_statistics("Equalized histogram"+str(i))
i+=1
return
def choose_simple(option):
"""
Handles simple transformation algorithms on the menu options in the simple menu bar
:param option: the chosen option
:return: nothing
"""
global showed_image
if option == "change contrast/brightness":
grey = check_if_image_grey(showed_image) # This value has to be set with the right function
if not grey:
img_process_gui.infoBox("Can't Edit Image", "You first have to convert it into grey image")
return
contrast = img_process_gui.numberBox("Contrast", "Set the contrast modifier")
if not contrast:
contrast = 1
brightness = img_process_gui.numberBox("Brightness", "Set the brightness modifier")
if not brightness:
brightness = 0
temp = Image.fromarray(np.uint8(ed_img.change_contrast_brightness(contrast, brightness)))
showed_image = 'temporary.png'
temp.save(showed_image)
update_image()
elif option == "convert to binary":
threshold = img_process_gui.numberBox("Threshold", "Set the threshold")
temp = Image.fromarray(np.uint8(ed_img.convert_to_binary(threshold)))
showed_image = 'temporary.png'
temp.save(showed_image)
update_image()
elif option == "convert to grey":
temp = Image.fromarray(np.uint8(ed_img.convert_to_grey()))
showed_image = 'temporary.png'
temp.save(showed_image)
update_image()
elif option == "mirror image":
grey = check_if_image_grey(showed_image) # This value has to be set with the right function
if not grey:
img_process_gui.infoBox("Can't Edit Image", "You first have to convert it into grey image")
mirror_param = img_process_gui.textBox("Mirror Parameter", "Set the mirror parameter")
if not mirror_param:
return
while not (mirror_param == "v" or mirror_param == "h" or mirror_param == "b"):
mirror_param = img_process_gui.textBox("Mirror Parameter", "The parameter must be v, h or b")
if not mirror_param:
return
temp = Image.fromarray(np.uint8(ed_img.mirror_image(mirror_param)))
showed_image = 'temporary.png'
temp.save(showed_image)
update_image()
return
def choose_filter(option):
global showed_image
"""
Handles filter algorithms on the menu options in the filter menu bar
:param option: the chosen option
:return: nothing
"""
grey = check_if_image_grey(showed_image) # This value has to be set with the right function
if not grey:
img_process_gui.infoBox("Can't Edit Image", "You first have to convert it into grey image")
elif option == "kuwahara":
mask_size = img_process_gui.numberBox("Mask Size", "Set the mask size")
while mask_size < 3 or mask_size % 2 == 0:
mask_size = img_process_gui.numberBox("Mask Size", "Set the mask size. It has to be odd and at least 3")
temp = Image.fromarray(np.uint8(ed_img.kuwahara_filter(mask_size)))
showed_image = 'temporary.png'
temp.save(showed_image)
update_image()
elif option == "linear filter":
linear_param = img_process_gui.textBox("Linear Filter Parameter", "Set the linear filter parameter "
"(b - boxfilter, g - gaussian filter, g2 - "
"gaussian filter in the second grade)")
while not (linear_param == "b" or linear_param == "g" or linear_param == "g2"):
linear_param = img_process_gui.textBox("Linear Filter Parameter", "The parameter must be b, g or g2")
temp = Image.fromarray(np.uint8(ed_img.linear_filter(linear_param)))
showed_image = 'temporary.png'
temp.save(showed_image)
update_image()
elif option == "min filter":
region_size = img_process_gui.numberBox("Region Size", "Set the region size")
while region_size < 3 or region_size % 2 == 0:
region_size = img_process_gui.numberBox("Region Size", "Set the region size. It has to be odd & at least 3")
temp = Image.fromarray(np.uint8(ed_img.min_filter(region_size)))
showed_image = 'temporary.png'
temp.save(showed_image)
update_image()
elif option == "median filter":
region_size = img_process_gui.numberBox("Region Size", "Set the region size")
while region_size < 3 or region_size % 2 == 0:
region_size = img_process_gui.numberBox("Region Size", "Set the region size. It has to be odd & at least 3")
temp = Image.fromarray(np.uint8(ed_img.median_filter(region_size)))
showed_image = 'temporary.png'
temp.save(showed_image)
update_image()
elif option == "max filter":
region_size = img_process_gui.numberBox("Region Size", "Set the region size")
while region_size < 3 or region_size % 2 == 0:
region_size = img_process_gui.numberBox("Region Size", "Set the region size. It has to be odd & at least 3")
temp = Image.fromarray(np.uint8(ed_img.max_filter(region_size)))
showed_image = 'temporary.png'
temp.save(showed_image)
update_image()
def choose_template(option):
"""
Handles template matching algorithms on the menu options in the template menu bar
- champfer matching
- correlation coefficient
- distance transformation
:param option: the chosen option
:return: nothing
"""
global showed_image
if option == "champfer matching":
template_path = img_process_gui.openBox("Template path")
temp = Image.fromarray(np.uint8(ed_img.champfer_matching(template_path)))
showed_image = 'temporary.png'
temp.save(showed_image)
update_image()
elif option == "correlation coefficient":
grey = check_if_image_grey(showed_image) # This value has to be set with the right function
if not grey:
img_process_gui.infoBox("Can't Edit Image", "You first have to convert it into grey image")
template_path = img_process_gui.openBox("Template path")
temp = Image.fromarray(np.uint8(ed_img.correlation_coefficient(template_path)))
showed_image = 'temporary.png'
temp.save(showed_image)
update_image()
elif option == "distance transformation":
temp = Image.fromarray(np.uint8(ed_img.distance_transformation()))
showed_image = 'temporary.png'
temp.save(showed_image)
update_image()
# add Menu list
image_statistics_name_list = ["color proportions", "histogram", "cumulative histogram", "histogram equalization"]
simple_operation_name_list = ["change contrast/brightness", "convert to binary", "convert to grey",
"mirror image"]
filter_operation_name_list = ["kuwahara", "linear filter", "min filter", "median filter", "max filter"]
template_matching_name_list = ["champfer matching", "correlation coefficient", "distance transformation"]
img_process_gui.addMenuList("Image Statistics", image_statistics_name_list, choose_statistics)
img_process_gui.addMenuList("Simple Operations", simple_operation_name_list, choose_simple)
img_process_gui.addMenuList("Filter Operations", filter_operation_name_list, choose_filter)
img_process_gui.addMenuList("Template Matching", template_matching_name_list, choose_template)
# add Buttons
img_process_gui.addButtons(["Save", "Load"], press)
img_process_gui.setStopFunction(clean)
# start the GUI
img_process_gui.go()
|
vvvityaaa/PyImgProcess
|
GUI.py
|
Python
|
mit
| 11,420
|
[
"Gaussian"
] |
20ed6e089a645656add4e09a36637f10c38fd7da0f814948ab91967ee4ba24ff
|
"""
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers between stucked
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
|
RPGOne/Skynet
|
scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/cluster/plot_kmeans_stability_low_dim_dense.py
|
Python
|
bsd-3-clause
| 4,334
|
[
"Gaussian"
] |
266a9a404e8f369d037d63056bb1a2e4a4fdd47e48c43388efdf58adb693a90d
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 29 2016
@author: violeta castelo-szekely
"""
import argparse, pysam, sys, subprocess, re
import numpy as np
transcripts= {}
tr_list = set()
sequences= {}
def get_length(sequence):
return len(sequence)
def get_gc_content(sequence):
if len(sequence) < 1:
return ('NA')
num_g = sequence.count('G')
num_c = sequence.count('C')
num_a = sequence.count('A')
num_t = sequence.count('T')
return (num_g + num_c) / float((num_g + num_c + num_a + num_t))
def get_kozak_score(sequence):
# for incomplete kozak sequences (from transcripts with zero-length or <6 5'UTR), setting the score to -1 to indicate oddball status
if len(sequence) < 10:
kozak_score = 'NA'
# kozak consensous is: GccA/GccATGG, where A is the +1 position. Score:
#'G' at -6 (+3 points); 'A'/'G' at -3 (3 points); G at +4 (3 points.)
#'C' at positions -1, -2, -4, -5 (1 point each)
else:
kozak_score = 0
kozak_score += 3 * sum((sequence[0] == "G", sequence[3] == "A" or sequence[3] == "G", sequence[-1] == "G"))
kozak_score += sum ((sequence[1] == "C", sequence[2] == "C", sequence[4] == "C", sequence[5] == "C"))
return kozak_score
# MFE (minimum folding energy. Using RNAfold from viennaRNA.)
def execute_external(program):
process = subprocess.Popen(program,
stdout = subprocess.PIPE,
shell = True)
return iter(process.stdout.readline, b'')
def get_energy(sequence):
if len(sequence) < 1:
return ('NA')
lines = execute_external("echo {} | RNAfold --noPS -p".format(sequence))
lines.next() #skip sequence line
MFE = float(re.sub('[()]','', lines.next().split()[-1]))
return (MFE)
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--gtf', help="specify the GTF file")
parser.add_argument('-r', '--region', help="specify the region. Choices: 'utr5', 'cds', 'utr3', 'transcript', 'kozak'.", type=str)
parser.add_argument('-f', '--fasta', help="specify the cDNA fasta.", type=str)
parser.add_argument('-w', '--what', help="feature of interest. Implemented options: 'length', 'gc' (GC content), 'kozak' (Kozak context), 'mfe' (minimum folding energy).", type=str)
parser.add_argument('-l', '--list', help="file containing the list of GeneID and TranscriptIDs of interest", type=str)
args = parser.parse_args()
try:
with open(args.gtf) as gtf_file:
for line in gtf_file:
parsed = line.strip().split('\t')
if parsed[4] == 'composite' :
continue
gid = parsed[0]
trid = parsed[2]
if not "|".join([gid,trid]) in transcripts:
transcripts["|".join([gid,trid])] = {'transcript': (0, int(parsed[5])), 'utr5': (0, int(parsed[6])-1),'cds': (int(parsed[6]), int(parsed[7])), 'utr3': (1+int(parsed[7]), int(parsed[5])), 'kozak': (int(parsed[6])-6, int(parsed[6])+4)}
#this is to handle transcripts with no 5'utr (cds starting at 0):
if transcripts["|".join([gid,trid])]['utr5'][1] < 0:
transcripts["|".join([gid,trid])]['utr5'] = (0,0)
transcripts["|".join([gid,trid])]['kozak'] = (0,4)
except IOError:
sys.stderr.write("Could not read the GTF file '{}'".format(args.gtf))
sys.exit(1)
if args.fasta == None:
sys.stderr.write('Need a cDNA FASTA file, use -f/--fasta option.\n')
sys.exit(1)
else:
FA_FILE = args.fasta
if args.region == None:
sys.stderr.write('Need to specify a region, use -r/--region option. For Kozak score, choose "-r kozak".\n')
sys.exit(1)
else:
region = args.region
if args.list == None:
sys.stderr.write("Could not read the GeneID, TranscriptID file {}".format(args.list))
sys.exit(1)
with open(args.list) as trs_file:
for line in trs_file:
parsed = line.strip().split('\t')
gid = parsed[0]
trid = parsed[1]
seq_obj = pysam.faidx(FA_FILE, "|".join([gid, trid]))
if not "|".join([gid,trid]) in sequences:
sequences["|".join([gid,trid])] = "".join([l.strip() for l in seq_obj[1:]])[transcripts["|".join([gid,trid])][region][0]:transcripts["|".join([gid,trid])][region][1]]
for gid in sequences:
if args.what == 'kozak':
sys.stdout.write('{}\t{}\t{}\n'.format(gid.split('|')[0], gid.split('|')[1], get_kozak_score(sequences[gid])))
if args.what == 'mfe':
mfe = get_energy(sequences[gid])
sys.stdout.write('{}\t{}\t{}\t{}\t{}\n'.format(gid.split('|')[0], gid.split('|')[1],mfe[0],mfe[1],mfe[2]))
if args.what == 'length':
sys.stdout.write('{}\t{}\t{}\n'.format(gid.split('|')[0], gid.split('|')[1], get_length(sequences[gid])))
if args.what == 'gc':
sys.stdout.write('{}\t{}\t{}\n'.format(gid.split('|')[0], gid.split('|')[1], get_gc_content(sequences[gid])))
if args.what == None:
sys.stderr.write('Need to specify a feature property to analyze, use -w/--what option.')
|
gatfieldlab/cross-organ_riboprof
|
data_analysis/transcript_features_v2.py
|
Python
|
gpl-3.0
| 5,088
|
[
"pysam"
] |
abb00d47dce6d113ebaeb17dd25f8e948cd3f68d6330c7a9e3ab097dbd37dfa3
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for gaussian noise layer."""
import keras
from keras.testing_infra import test_combinations
from keras.testing_infra import test_utils
import numpy as np
import tensorflow.compat.v2 as tf
@test_combinations.run_all_keras_modes
class NoiseLayersTest(test_combinations.TestCase):
def test_GaussianNoise(self):
test_utils.layer_test(
keras.layers.GaussianNoise,
kwargs={'stddev': 1.},
input_shape=(3, 2, 3))
def _make_model(self, dtype):
assert dtype in (tf.float32, tf.float64)
model = keras.Sequential()
model.add(keras.layers.Dense(8, input_shape=(32,), dtype=dtype))
layer = keras.layers.GaussianNoise(0.0003, dtype=dtype)
model.add(layer)
return model
def _train_model(self, dtype):
model = self._make_model(dtype)
model.compile(
optimizer='sgd',
loss='mse',
run_eagerly=test_utils.should_run_eagerly())
model.train_on_batch(np.zeros((8, 32)), np.zeros((8, 8)))
def test_gaussian_noise_float32(self):
self._train_model(tf.float32)
def test_gaussian_noise_float64(self):
self._train_model(tf.float64)
if __name__ == '__main__':
tf.test.main()
|
keras-team/keras
|
keras/layers/regularization/gaussian_noise_test.py
|
Python
|
apache-2.0
| 1,859
|
[
"Gaussian"
] |
af24fba55e76bf98f62f1875655f90a063a006d6fb0f2e12975c1ae9b3ee6805
|
#########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2013 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
import math
import pylab
import numpy
import moose
def makeModel():
# create container for model
model = moose.Neutral( 'model' )
compartment = moose.CubeMesh( '/model/compartment' )
compartment.volume = 1e-15
# the mesh is created automatically by the compartment
mesh = moose.element( '/model/compartment/mesh' )
# create molecules and reactions
a = moose.Pool( '/model/compartment/a' )
b = moose.Pool( '/model/compartment/b' )
c = moose.Pool( '/model/compartment/c' )
enz1 = moose.Enz( '/model/compartment/b/enz1' )
enz2 = moose.Enz( '/model/compartment/c/enz2' )
cplx1 = moose.Pool( '/model/compartment/b/enz1/cplx' )
cplx2 = moose.Pool( '/model/compartment/c/enz2/cplx' )
reac = moose.Reac( '/model/compartment/reac' )
# connect them up for reactions
moose.connect( enz1, 'sub', a, 'reac' )
moose.connect( enz1, 'prd', b, 'reac' )
moose.connect( enz1, 'enz', b, 'reac' )
moose.connect( enz1, 'cplx', cplx1, 'reac' )
moose.connect( enz2, 'sub', b, 'reac' )
moose.connect( enz2, 'prd', a, 'reac' )
moose.connect( enz2, 'enz', c, 'reac' )
moose.connect( enz2, 'cplx', cplx2, 'reac' )
moose.connect( reac, 'sub', a, 'reac' )
moose.connect( reac, 'prd', b, 'reac' )
# connect them up to the compartment for volumes
#for x in ( a, b, c, cplx1, cplx2 ):
# moose.connect( x, 'mesh', mesh, 'mesh' )
# Assign parameters
a.concInit = 1
b.concInit = 0
c.concInit = 0.01
enz1.kcat = 0.4
enz1.Km = 4
enz2.kcat = 0.6
enz2.Km = 0.01
reac.Kf = 0.001
reac.Kb = 0.01
# Create the output tables
graphs = moose.Neutral( '/model/graphs' )
outputA = moose.Table2 ( '/model/graphs/concA' )
outputB = moose.Table2 ( '/model/graphs/concB' )
# connect up the tables
moose.connect( outputA, 'requestOut', a, 'getConc' );
moose.connect( outputB, 'requestOut', b, 'getConc' );
def displayPlots():
for x in moose.wildcardFind( '/model/graphs/conc#' ):
t = numpy.arange( 0, x.vector.size, 1 ) #sec
pylab.plot( t, x.vector, label=x.name )
pylab.legend()
pylab.show()
def main():
"""
This example illustrates how to set up a kinetic solver and kinetic model
using the scripting interface. Normally this would be done using the
Shell::doLoadModel command, and normally would be coordinated by the
SimManager as the base of the entire model.
This example creates a bistable model having two enzymes and a reaction.
One of the enzymes is autocatalytic.
The model is set up to run using Exponential Euler integration.
"""
makeModel()
ksolve = moose.Ksolve( '/model/compartment/ksolve' )
try:
ksolve.numThreads = 10
except Exception as e:
print( 'No parallel ksolve' )
stoich = moose.Stoich( '/model/compartment/stoich' )
stoich.compartment = moose.element( '/model/compartment' )
stoich.ksolve = ksolve
stoich.path = "/model/compartment/##"
#solver.method = "rk5"
#mesh = moose.element( "/model/compartment/mesh" )
#moose.connect( mesh, "remesh", solver, "remesh" )
'''
moose.setClock( 5, 1.0 ) # clock for the solver
moose.useClock( 5, '/model/compartment/ksolve', 'process' )
'''
moose.reinit()
moose.start( 100.0 ) # Run the model for 100 seconds.
a = moose.element( '/model/compartment/a' )
b = moose.element( '/model/compartment/b' )
# move most molecules over to b
b.conc = b.conc + a.conc * 0.9
a.conc = a.conc * 0.1
moose.start( 100.0 ) # Run the model for 100 seconds.
# move most molecules back to a
a.conc = a.conc + b.conc * 0.99
b.conc = b.conc * 0.01
moose.start( 100.0 ) # Run the model for 100 seconds.
# Iterate through all plots, dump their contents to data.plot.
displayPlots()
quit()
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
|
BhallaLab/moose
|
moose-examples/snippets/scriptKineticSolver.py
|
Python
|
gpl-3.0
| 4,443
|
[
"MOOSE"
] |
fd54cce007ba9c1013677ca4e68db8f8768f47a3a69e5e3934fbd3885dc06487
|
#!/usr/bin/env python2
#
# Copyright (C) 2013-2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# -*- coding: utf-8 -*-
#
import espressopp
from espressopp import unittest
import mpi4py.MPI as MPI
import math
from espressopp import Real3D
class TestFixedLocalTupleList(unittest.TestCase) :
def setUp(self) :
system = espressopp.System()
rng = espressopp.esutil.RNG()
N = 4
SIZE = float(N)
box = Real3D(SIZE)
bc = espressopp.bc.OrthorhombicBC(None, box)
system.bc = bc
# a small skin avoids rounding problems
system.skin = 0.001
cutoff = SIZE/2. - system.skin
comm = espressopp.MPI.COMM_WORLD
nodeGrid = espressopp.tools.decomp.nodeGrid(espressopp.MPI.COMM_WORLD.size,box,cutoff,system.skin)
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, cutoff, system.skin)
print 'NodeGrid = %s'%(nodeGrid,)
print 'CellGrid = %s'%cellGrid
system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid)
pid = 0
for i in xrange(N):
for j in xrange(N):
for k in xrange(N):
r = 0.5
x = (i + r) / N * SIZE
y = (j + r) / N * SIZE
z = (k + r) / N * SIZE
system.storage.addParticle(pid, Real3D(x, y, z))
pid = pid + 1
for i in xrange(N):
for j in xrange(N):
for k in xrange(N):
r = 0.25
x = (i + r) / N * SIZE
y = (j + r) / N * SIZE
z = (k + r) / N * SIZE
system.storage.addParticle(pid, Real3D(x, y, z))
pid = pid + 1
system.storage.decompose()
# now build Fixed Local Tuple List
tuplelist = espressopp.FixedLocalTupleList(system.storage)
self.system = system
self.N = N
self.tuplelist = tuplelist
# This function checks the size of empty FixedLocalTupleList.
def test_create_fixedtuplelist(self) :
self.assertEqual(sum(self.tuplelist.size(), 0), 0)
# This function checks the python interface of FixedLocalTupleList.
# For addTuple() and size(), this function test
# whether the added tuple number equals the return value of size().
# For getTuples(), this function test
# whether a tuplelist obtained by getTuples equals added tuplelist
def test_add_get_fixedtuplelist(self) :
system = self.system
N = self.N
tuplelist = self.tuplelist
# FixedLocalTupleList contain particles
num_constrain = N*N
stored = []
for i in range(N*N*N/num_constrain):
tuple = []
for j in range(num_constrain):
tuple.append(num_constrain*i + j)
tuplelist.addTuple(tuple)
stored.append(tuple)
num_constrain = N*N/2
for i in range(N*N*N/num_constrain, 2*N*N*N/num_constrain):
tuple = []
for j in range(num_constrain):
tuple.append(num_constrain*i + j)
tuplelist.addTuple(tuple)
stored.append(tuple)
# check the size of FixedLocalTupleList
self.assertEqual(sum(tuplelist.size(), 0), 1.5*N*N*N/num_constrain)
# check the contained particles id
g_tuplelist = tuplelist.getTuples()
s_id = 0
for i in range(3*N*N*N/num_constrain/2):
for j in range(espressopp.MPI.COMM_WORLD.size):
if stored[s_id] in g_tuplelist[j]:
break
self.assertEqual(stored[s_id] in g_tuplelist[j], True)
s_id += 1
if __name__ == "__main__":
unittest.main()
|
kkreis/espressopp
|
testsuite/FixedLocalTuple/TestFixedLocalTupleList.py
|
Python
|
gpl-3.0
| 4,612
|
[
"ESPResSo"
] |
f237168a098942402c43480622055db5917d4aa8fee3e5c499728811335ba531
|
#!/usr/bin/env python
import os
import sys
from glob import glob
sys.path.insert(0, os.path.abspath('lib'))
from ansible import __version__, __author__
try:
from setuptools import setup
except ImportError:
print "Ansible now needs setuptools in order to build. " + \
"Install it using your package manager (usually python-setuptools) or via pip (pip install setuptools)."
sys.exit(1)
# find library modules
from ansible.constants import DEFAULT_MODULE_PATH
module_paths = DEFAULT_MODULE_PATH.split(os.pathsep)
# always install in /usr/share/ansible if specified
# otherwise use the first module path listed
if '/usr/share/ansible' in module_paths:
install_path = '/usr/share/ansible'
else:
install_path = module_paths[0]
dirs=os.listdir("./library/")
data_files = []
for i in dirs:
data_files.append((os.path.join(install_path, i), glob('./library/' + i + '/*')))
setup(name='ansible',
version=__version__,
description='Radically simple IT automation',
author=__author__,
author_email='michael@ansible.com',
url='http://ansible.com/',
license='GPLv3',
install_requires=['paramiko', 'jinja2', "PyYAML", 'setuptools', 'pycrypto >= 2.6'],
package_dir={ 'ansible': 'lib/ansible' },
packages=[
'ansible',
'ansible.utils',
'ansible.utils.module_docs_fragments',
'ansible.inventory',
'ansible.inventory.vars_plugins',
'ansible.playbook',
'ansible.runner',
'ansible.runner.action_plugins',
'ansible.runner.lookup_plugins',
'ansible.runner.connection_plugins',
'ansible.runner.filter_plugins',
'ansible.callback_plugins',
'ansible.module_utils'
],
scripts=[
'bin/ansible',
'bin/ansible-playbook',
'bin/ansible-pull',
'bin/ansible-doc',
'bin/ansible-galaxy',
'bin/ansible-vault',
],
data_files=data_files
)
|
pilwon/ansible
|
setup.py
|
Python
|
gpl-3.0
| 1,984
|
[
"Galaxy"
] |
b9760e2bcc167254a28609ee33e711efb91d14a2a43aaeb04dea8a2ac5c4de86
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2010 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
""" Loan wizard"""
from decimal import Decimal
import datetime
import gtk
from kiwi.currency import currency
from kiwi.datatypes import ValidationError
from kiwi.python import Settable
from kiwi.ui.widgets.entry import ProxyEntry
from kiwi.ui.objectlist import Column
from storm.expr import And, Or, Eq
from stoqlib.api import api
from stoqlib.domain.person import (Client, LoginUser,
ClientCategory)
from stoqlib.domain.loan import Loan, LoanItem
from stoqlib.domain.payment.group import PaymentGroup
from stoqlib.domain.sale import Sale
from stoqlib.domain.sellable import Sellable
from stoqlib.domain.views import LoanView, ProductWithStockBranchView
from stoqlib.lib.dateutils import localtoday
from stoqlib.lib.defaults import MAX_INT
from stoqlib.lib.formatters import format_quantity
from stoqlib.lib.message import info, yesno
from stoqlib.lib.translation import stoqlib_gettext
from stoqlib.gui.base.dialogs import run_dialog
from stoqlib.gui.base.wizards import (WizardEditorStep, BaseWizard,
BaseWizardStep)
from stoqlib.gui.dialogs.batchselectiondialog import BatchDecreaseSelectionDialog
from stoqlib.gui.dialogs.missingitemsdialog import (get_missing_items,
MissingItemsDialog)
from stoqlib.gui.events import (NewLoanWizardFinishEvent,
CloseLoanWizardFinishEvent,
LoanItemSelectionStepEvent)
from stoqlib.gui.editors.loanitemeditor import LoanItemEditor
from stoqlib.gui.editors.noteeditor import NoteEditor
from stoqlib.gui.search.searchcolumns import IdentifierColumn, SearchColumn
from stoqlib.gui.search.searchslave import SearchSlave
from stoqlib.gui.utils.printing import print_report
from stoqlib.gui.widgets.searchentry import ClientSearchEntryGadget
from stoqlib.gui.wizards.abstractwizard import SellableItemStep
from stoqlib.gui.wizards.salequotewizard import SaleQuoteItemStep
from stoqlib.reporting.loanreceipt import LoanReceipt
_ = stoqlib_gettext
#
# Wizard Steps
#
class StartNewLoanStep(WizardEditorStep):
gladefile = 'SalesPersonStep'
model_type = Loan
proxy_widgets = ['client', 'salesperson', 'expire_date',
'client_category']
def _setup_widgets(self):
# Hide total and subtotal
self.summary_table.hide()
self.total_box.hide()
# Hide invoice number details
self.invoice_number_label.hide()
self.invoice_number.hide()
# Hide cost center combobox
self.cost_center_lbl.hide()
self.cost_center.hide()
# Responsible combo
self.salesperson_lbl.set_text(_(u'Responsible:'))
self.salesperson.model_attribute = 'responsible'
users = self.store.find(LoginUser, is_active=True)
self.salesperson.prefill(api.for_person_combo(users))
self.salesperson.set_sensitive(False)
self._setup_clients_widget()
self._fill_clients_category_combo()
self.expire_date.mandatory = True
# CFOP combo
self.cfop_lbl.hide()
self.cfop.hide()
self.create_cfop.hide()
# Transporter/RemovedBy Combo
self.transporter_lbl.set_text(_(u'Removed By:'))
self.create_transporter.hide()
# removed_by widget
self.removed_by = ProxyEntry(unicode)
self.removed_by.model_attribute = 'removed_by'
if 'removed_by' not in self.proxy_widgets:
self.proxy_widgets.append('removed_by')
self.removed_by.show()
self._replace_widget(self.transporter, self.removed_by)
def _setup_clients_widget(self):
self.client.mandatory = True
self.client_gadget = ClientSearchEntryGadget(
entry=self.client,
store=self.store,
model=self.model,
parent=self.wizard)
def _fill_clients_category_combo(self):
categories = self.store.find(ClientCategory)
self.client_category.prefill(api.for_combo(categories, empty=''))
def _replace_widget(self, old_widget, new_widget):
# retrieve the position, since we will replace two widgets later.
parent = old_widget.get_parent()
top = parent.child_get_property(old_widget, 'top-attach')
bottom = parent.child_get_property(old_widget, 'bottom-attach')
left = parent.child_get_property(old_widget, 'left-attach')
right = parent.child_get_property(old_widget, 'right-attach')
parent.remove(old_widget)
parent.attach(new_widget, left, right, top, bottom)
def _get_client(self):
client_id = self.client.read()
return self.store.get(Client, client_id)
#
# WizardStep hooks
#
def post_init(self):
self.register_validate_function(self.wizard.refresh_next)
self.force_validation()
def next_step(self):
return LoanItemStep(self.wizard, self, self.store, self.model)
def has_previous_step(self):
return False
def setup_proxies(self):
self._setup_widgets()
self.proxy = self.add_proxy(self.model,
StartNewLoanStep.proxy_widgets)
#
# Callbacks
#
def on_client__changed(self, widget):
client = self._get_client()
if not client:
return
self.client_category.select(client.category)
def on_expire_date__validate(self, widget, value):
if value < localtoday().date():
msg = _(u"The expire date must be set to today or a future date.")
return ValidationError(msg)
def on_observations_button__clicked(self, *args):
run_dialog(NoteEditor, self.wizard, self.store, self.model, 'notes',
title=_("Additional Information"))
class LoanItemStep(SaleQuoteItemStep):
""" Wizard step for loan items selection """
model_type = Loan
item_table = LoanItem
sellable_view = ProductWithStockBranchView
item_editor = LoanItemEditor
validate_stock = True
batch_selection_dialog = BatchDecreaseSelectionDialog
def get_sellable_view_query(self):
branch = self.model.branch
# Also include products that are not storable
branch_query = Or(self.sellable_view.branch_id == branch.id,
Eq(self.sellable_view.branch_id, None))
# The stock quantity of consigned products can not be
# decreased manually. See bug 5212.
query = And(branch_query,
Sellable.get_available_sellables_query(self.store))
return self.sellable_view, query
def has_next_step(self):
return False
class LoanSelectionStep(BaseWizardStep):
gladefile = 'HolderTemplate'
def __init__(self, wizard, store):
BaseWizardStep.__init__(self, store, wizard)
self.setup_slaves()
def _create_filters(self):
self.search.set_text_field_columns(['client_name', 'identifier_str'])
def _get_columns(self):
return [IdentifierColumn('identifier', title=_('Loan #'), sorted=True),
SearchColumn('responsible_name', title=_(u'Responsible'),
data_type=str, expand=True),
SearchColumn('client_name', title=_(u'Client'),
data_type=str, expand=True),
SearchColumn('open_date', title=_(u'Opened'),
data_type=datetime.date),
SearchColumn('expire_date', title=_(u'Expire'),
data_type=datetime.date),
Column('loaned', title=_(u'Loaned'),
data_type=Decimal),
]
def _refresh_next(self, value=None):
can_continue = False
selected_rows = self.search.results.get_selected_rows()
if selected_rows:
client = selected_rows[0].client_id
branch = selected_rows[0].branch_id
# Only loans that belong to the same client and are from the same
# branch can be closed together
can_continue = all(v.client_id == client and v.branch_id == branch
for v in selected_rows)
self.wizard.refresh_next(can_continue)
def get_extra_query(self, states):
return LoanView.status == Loan.STATUS_OPEN
def setup_slaves(self):
self.search = SearchSlave(self._get_columns(),
restore_name=self.__class__.__name__,
store=self.store,
search_spec=LoanView)
self.search.enable_advanced_search()
self.attach_slave('place_holder', self.search)
executer = self.search.get_query_executer()
executer.add_query_callback(self.get_extra_query)
self._create_filters()
self.search.results.connect('selection-changed',
self._on_results_selection_changed)
self.search.results.set_selection_mode(gtk.SELECTION_MULTIPLE)
self.search.focus_search_entry()
#
# WizardStep
#
def has_previous_step(self):
return False
def post_init(self):
self.register_validate_function(self._refresh_next)
self.force_validation()
def next_step(self):
# FIXME: For some reason, the loan isn't in self.store
views = self.search.results.get_selected_rows()
self.wizard.models = [self.store.fetch(v.loan) for v in views]
return LoanItemSelectionStep(self.wizard, self, self.store,
self.wizard.models)
#
# Callbacks
#
def _on_results_selection_changed(self, widget, selection):
self._refresh_next()
class LoanItemSelectionStep(SellableItemStep):
model_type = list
item_table = LoanItem
cost_editable = False
summary_label_column = None
def __init__(self, wizard, previous, store, model):
super(LoanItemSelectionStep, self).__init__(wizard, previous,
store, model)
for loan in model:
for item in loan.loaned_items:
self.wizard.original_items[item] = Settable(
quantity=item.quantity,
sale_quantity=item.sale_quantity,
return_quantity=item.return_quantity,
remaining_quantity=item.get_remaining_quantity(),
)
LoanItemSelectionStepEvent.emit(self)
#
# SellableItemStep
#
def has_next_step(self):
return False
def post_init(self):
super(LoanItemSelectionStep, self).post_init()
self.hide_add_button()
self.hide_edit_button()
self.hide_del_button()
self.hide_item_addition_toolbar()
self.slave.klist.connect('cell-edited', self._on_klist__cell_edited)
self.slave.klist.connect('cell-editing-started',
self._on_klist__cell_editing_started)
self.force_validation()
def get_columns(self):
adjustment = gtk.Adjustment(lower=0, upper=MAX_INT, step_incr=1)
return [
Column('sellable.code', title=_('Code'),
data_type=str, visible=False),
Column('sellable.barcode', title=_('Barcode'),
data_type=str, visible=False),
Column('sellable.description', title=_('Description'),
data_type=str, expand=True),
Column('quantity', title=_('Loaned'),
data_type=Decimal, format_func=format_quantity),
Column('sale_quantity', title=_('Sold'),
data_type=Decimal, format_func=format_quantity,
editable=True, spin_adjustment=adjustment),
Column('return_quantity', title=_('Returned'),
data_type=Decimal, format_func=format_quantity,
editable=True, spin_adjustment=adjustment),
Column('remaining_quantity', title=_('Remaining'),
data_type=Decimal, format_func=format_quantity),
Column('price', title=_('Price'), data_type=currency),
]
def get_saved_items(self):
for loan in self.model:
for item in loan.loaned_items:
yield item
def validate_step(self):
any_changed = False
has_sale_items = False
for item in self.get_saved_items():
original = self.wizard.original_items[item]
sale_quantity = item.sale_quantity - original.sale_quantity
if sale_quantity > 0:
has_sale_items = True
if item.get_remaining_quantity() < original.remaining_quantity:
any_changed = True
# Should not happen!
assert (item.sale_quantity >= original.sale_quantity or
item.return_quantity >= original.return_quantity)
assert item.quantity >= item.sale_quantity + item.return_quantity
if self.wizard.require_sale_items and not has_sale_items:
return False
# Don't let user finish if he didn't mark anything to return/sale
return any_changed
def validate(self, value):
super(LoanItemSelectionStep, self).validate(value)
self.wizard.refresh_next(value and self.validate_step())
#
# Callbacks
#
def _on_klist__cell_edited(self, klist, obj, attr):
# FIXME: Even with the adjustment, the user still can type
# values out of range with the keyboard. Maybe it's kiwi's fault
if attr in ['sale_quantity', 'return_quantity']:
value = getattr(obj, attr)
lower_value = getattr(self.wizard.original_items[obj], attr)
if value < lower_value:
setattr(obj, attr, lower_value)
diff = obj.quantity - obj.return_quantity - obj.sale_quantity
if diff < 0:
setattr(obj, attr, value + diff)
self.force_validation()
def _on_klist__cell_editing_started(self, klist, obj, attr,
renderer, editable):
original_item = self.wizard.original_items[obj]
if attr == 'sale_quantity':
adjustment = editable.get_adjustment()
adjustment.set_lower(original_item.sale_quantity)
adjustment.set_upper(obj.quantity - obj.return_quantity)
if attr == 'return_quantity':
adjustment = editable.get_adjustment()
adjustment.set_lower(original_item.return_quantity)
adjustment.set_upper(obj.quantity - obj.sale_quantity)
#
# Main wizard
#
class NewLoanWizard(BaseWizard):
size = (775, 400)
help_section = 'loan'
def __init__(self, store, model=None):
title = self._get_title(model)
model = model or self._create_model(store)
if model.status != Loan.STATUS_OPEN:
raise ValueError('Invalid loan status. It should '
'be STATUS_OPEN')
first_step = StartNewLoanStep(store, self, model)
BaseWizard.__init__(self, store, first_step, model, title=title,
edit_mode=False)
def _get_title(self, model=None):
if not model:
return _('New Loan Wizard')
def _create_model(self, store):
loan = Loan(responsible=api.get_current_user(store),
branch=api.get_current_branch(store),
store=store)
# Temporarily save the client_category, so it works fine with
# SaleQuoteItemStep
loan.client_category = None
return loan
def _print_receipt(self, order):
# we can only print the receipt if the loan was confirmed.
if yesno(_('Would you like to print the receipt now?'),
gtk.RESPONSE_YES, _("Print receipt"), _("Don't print")):
print_report(LoanReceipt, order)
#
# WizardStep hooks
#
def finish(self):
missing = get_missing_items(self.model, self.store)
if missing:
run_dialog(MissingItemsDialog, self, self.model, missing)
return False
self.model.sync_stock()
self.retval = self.model
self.close()
NewLoanWizardFinishEvent.emit(self.model)
# Confirm before printing to avoid losing data if something breaks
self.store.confirm(self.retval)
self._print_receipt(self.model)
class CloseLoanWizard(BaseWizard):
size = (775, 400)
title = _(u'Close Loan Wizard')
help_section = 'loan'
def __init__(self, store, create_sale=True, require_sale_items=False):
"""
:param store: A database store
:param create_sale: If a sale should be created for all the items that
will be sold from this loan.
:param require_sale_items: If there should be at least one item sold in
the Loan. If ``False``, a loan with only returned items will be allowed
to be confirmed. When ``True``, there should be at least one item in
the loan that will be sold before confirming this wizard.
"""
self._create_sale = create_sale
self._sold_items = []
self.original_items = {}
self.require_sale_items = require_sale_items
first_step = LoanSelectionStep(self, store)
BaseWizard.__init__(self, store, first_step, model=None,
title=self.title, edit_mode=False)
#
# Public API
#
def get_sold_items(self):
"""Get items set to be sold on this wizard
Returns a list of sold |sellables|, the quantity sold of those
|sellables| and the price it was sold at.
:returns: a list of tuples (|sellable|, quantity, price)
"""
return self._sold_items
#
# WizardStep hooks
#
def finish(self):
for loan in self.models:
for item in loan.loaned_items:
original = self.original_items[item]
sale_quantity = item.sale_quantity - original.sale_quantity
if sale_quantity > 0:
self._sold_items.append(
(item.sellable, sale_quantity, item.price))
if self._create_sale and self._sold_items:
user = api.get_current_user(self.store)
sale = Sale(
store=self.store,
# Even if there is more than one loan, they are always from the
# same (client, branch)
branch=self.models[0].branch,
client=self.models[0].client,
salesperson=user.person.sales_person,
group=PaymentGroup(store=self.store),
coupon_id=None)
for sellable, quantity, price in self._sold_items:
sale.add_sellable(sellable, quantity, price,
# Quantity was already decreased on loan
quantity_decreased=quantity)
sale.order()
info(_("Close loan details..."),
_("A sale was created from loan items. You can confirm "
"that sale in the Till application later."))
else:
sale = None
for model in self.models:
model.sync_stock()
if model.can_close():
model.close()
self.retval = self.models
self.close()
CloseLoanWizardFinishEvent.emit(self.models, sale, self)
def test(): # pragma nocover
creator = api.prepare_test()
run_dialog(CloseLoanWizard, None, creator.store, create_sale=True)
creator.store.rollback()
#creator.store.confirm(retval)
if __name__ == '__main__': # pragma nocover
test()
|
andrebellafronte/stoq
|
stoqlib/gui/wizards/loanwizard.py
|
Python
|
gpl-2.0
| 20,757
|
[
"VisIt"
] |
4276fc0b15a3abcfb97caa035dbe9e402e8b923427586b401ffc901d7538f590
|
#
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Tests particle property setters/getters
from __future__ import print_function
import unittest as ut
import espressomd
import numpy as np
from espressomd.interactions import FeneBond
from time import time
from espressomd.accumulators import Correlator
from espressomd.observables import ParticleVelocities, ParticleBodyAngularVelocities
@ut.skipIf(espressomd.has_features("THERMOSTAT_IGNORE_NON_VIRTUAL"),
"Skipped because of THERMOSTAT_IGNORE_NON_VIRTUAL")
class LangevinThermostat(ut.TestCase):
"""Tests the velocity distribution created by the Langevin thermostat against
the single component Maxwell distribution."""
s = espressomd.System(box_l=[1.0, 1.0, 1.0])
s.cell_system.set_n_square()
s.cell_system.skin = 0.3
s.seed = range(s.cell_system.get_state()["n_nodes"])
if espressomd.has_features("PARTIAL_PERIODIC"):
s.periodicity = 0,0,0
@classmethod
def setUpClass(cls):
np.random.seed(42)
def single_component_maxwell(self, x1, x2, kT):
"""Integrate the probability density from x1 to x2 using the trapez rule"""
x = np.linspace(x1, x2, 1000)
return np.trapz(np.exp(-x**2 / (2. * kT)), x) / \
np.sqrt(2. * np.pi * kT)
def check_velocity_distribution(self, vel, minmax, n_bins, error_tol, kT):
"""check the recorded particle distributions in vel againsta histogram with n_bins bins. Drop velocities outside minmax. Check individual histogram bins up to an accuracy of error_tol agaisnt the analytical result for kT."""
for i in range(3):
hist = np.histogram(
vel[:, i], range=(-minmax, minmax), bins=n_bins, normed=False)
data = hist[0] / float(vel.shape[0])
bins = hist[1]
for j in range(n_bins):
found = data[j]
expected = self.single_component_maxwell(
bins[j], bins[j + 1], kT)
self.assertLessEqual(abs(found - expected), error_tol)
def test_aa_verify_single_component_maxwell(self):
"""Verifies the normalization of the analytical expression."""
self.assertLessEqual(
abs(self.single_component_maxwell(-10, 10, 4.) - 1.), 1E-4)
def test_global_langevin(self):
"""Test for global Langevin parameters."""
N = 200
s = self.s
s.part.clear()
s.time_step = 0.04
# Place particles
s.part.add(pos=np.random.random((N, 3)))
# Enable rotation if compiled in
if espressomd.has_features("ROTATION"):
s.part[:].rotation = 1,1,1
kT = 2.3
gamma = 1.5
s.thermostat.set_langevin(kT=kT, gamma=gamma)
# Warmup
s.integrator.run(100)
# Sampling
loops = 4000
v_stored = np.zeros((N * loops, 3))
omega_stored = np.zeros((N * loops, 3))
for i in range(loops):
s.integrator.run(1)
v_stored[i * N:(i + 1) * N, :] = s.part[:].v
if espressomd.has_features("ROTATION"):
omega_stored[i * N:(i + 1) * N, :] = s.part[:].omega_body
v_minmax = 5
bins = 5
error_tol = 0.015
self.check_velocity_distribution(
v_stored, v_minmax, bins, error_tol, kT)
if espressomd.has_features("ROTATION"):
self.check_velocity_distribution(
omega_stored, v_minmax, bins, error_tol, kT)
@ut.skipIf(not espressomd.has_features("LANGEVIN_PER_PARTICLE"),
"Test requires LANGEVIN_PER_PARTICLE")
def test_langevin_per_particle(self):
"""Test for Langevin particle. Covers all combinations of
particle specific gamma and temp set or not set.
"""
N = 200
s = self.s
s.part.clear()
s.time_step = 0.04
s.part.add(pos=np.random.random((N, 3)))
if espressomd.has_features("ROTATION"):
s.part[:].rotation = 1,1,1
kT = 2.3
gamma = 1.5
gamma2 = 2.3
kT2 = 1.5
s.thermostat.set_langevin(kT=kT, gamma=gamma)
# Set different kT on 2nd half of particles
s.part[int(N / 2):].temp = kT2
# Set different gamma on half of the partiles (overlap over both kTs)
if espressomd.has_features("PARTICLE_ANISOTROPY"):
s.part[int(N / 4):int(3 * N / 4)].gamma = gamma2, gamma2, gamma2
else:
s.part[int(N / 4):int(3 * N / 4)].gamma = gamma2
s.integrator.run(50)
loops = 4000
v_kT = np.zeros((int(N / 2) * loops, 3))
v_kT2 = np.zeros((int(N / 2 * loops), 3))
if espressomd.has_features("ROTATION"):
omega_kT = np.zeros((int(N / 2) * loops, 3))
omega_kT2 = np.zeros((int(N / 2 * loops), 3))
for i in range(loops):
s.integrator.run(1)
v_kT[int(i * N / 2):int((i + 1) * N / 2),
:] = s.part[:int(N / 2)].v
v_kT2[int(i * N / 2):int((i + 1) * N / 2),
:] = s.part[int(N / 2):].v
if espressomd.has_features("ROTATION"):
omega_kT[int(i * N / 2):int((i + 1) * N / 2),
:] = s.part[:int(N / 2)].omega_body
omega_kT2[int(i * N / 2):int((i + 1) * N / 2),
:] = s.part[int(N / 2):].omega_body
v_minmax = 5
bins = 5
error_tol = 0.014
self.check_velocity_distribution(v_kT, v_minmax, bins, error_tol, kT)
self.check_velocity_distribution(v_kT2, v_minmax, bins, error_tol, kT2)
if espressomd.has_features("ROTATION"):
self.check_velocity_distribution(omega_kT, v_minmax, bins, error_tol, kT)
self.check_velocity_distribution(omega_kT2, v_minmax, bins, error_tol, kT2)
def setup_diff_mass_rinertia(self,p):
if espressomd.has_features("MASS"):
p.mass=0.5
if espressomd.has_features("ROTATION"):
p.rotation = 1,1,1
# Make sure rinertia does not change diff coeff
if espressomd.has_features("ROTATIONAL_INERTIA"):
p.rinertia =0.4,0.4,0.4
def test_diffusion(self):
"""This tests rotational and translational diffusion coeff via green-kubo"""
s=self.s
s.part.clear()
kT=1.37
dt=0.1
s.time_step=dt
# Translational gamma. We cannot test per-component, if rotation is on,
# because body and space frames become different.
gamma=3.1
# Rotational gamma
gamma_rot_i=0.7
gamma_rot_a=0.7,1,1.2
# If we have langevin per particle:
# per particle kT
per_part_kT=1.6
# Translation
per_part_gamma=1.63
# Rotational
per_part_gamma_rot_i=0.6
per_part_gamma_rot_a=0.4,0.8,1.1
# Particle with global thermostat params
p_global=s.part.add(pos=(0,0,0))
# Make sure, mass doesn't change diff coeff
self.setup_diff_mass_rinertia(p_global)
# particle specific gamma, kT, and both
if espressomd.has_features("LANGEVIN_PER_PARTICLE"):
p_gamma=s.part.add(pos=(0,0,0))
self.setup_diff_mass_rinertia(p_gamma)
if espressomd.has_features("PARTICLE_ANISOTROPY"):
p_gamma.gamma =per_part_gamma,per_part_gamma,per_part_gamma
if espressomd.has_features("ROTATION"):
p_gamma.gamma_rot=per_part_gamma_rot_a
else:
p_gamma.gamma =per_part_gamma
if espressomd.has_features("ROTATION"):
p_gamma.gamma_rot=per_part_gamma_rot_i
p_kT=s.part.add(pos=(0,0,0))
self.setup_diff_mass_rinertia(p_kT)
p_kT.temp=per_part_kT
p_both=s.part.add(pos=(0,0,0))
self.setup_diff_mass_rinertia(p_both)
p_both.temp=per_part_kT
if espressomd.has_features("PARTICLE_ANISOTROPY"):
p_both.gamma =per_part_gamma,per_part_gamma,per_part_gamma
if espressomd.has_features("ROTATION"):
p_both.gamma_rot=per_part_gamma_rot_a
else:
p_both.gamma =per_part_gamma
if espressomd.has_features("ROTATION"):
p_both.gamma_rot=per_part_gamma_rot_i
# Thermostat setup
if espressomd.has_features("ROTATION"):
if espressomd.has_features("PARTICLE_ANISOTROPY"):
# particle anisotropy and rotation
s.thermostat.set_langevin(kT=kT,gamma=gamma,gamma_rotation=gamma_rot_a)
else:
# Rotation without particle anisotropy
s.thermostat.set_langevin(kT=kT,gamma=gamma,gamma_rotation=gamma_rot_i)
else:
# No rotation
s.thermostat.set_langevin(kT=kT,gamma=gamma)
s.cell_system.skin =0.4
s.integrator.run(5000)
# Correlators
vel_obs={}
omega_obs={}
corr_vel={}
corr_omega={}
all_particles=[p_global]
if espressomd.has_features("LANGEVIN_PER_PARTICLE"):
all_particles.append(p_gamma)
all_particles.append(p_kT)
all_particles.append(p_both)
# linear vel
vel_obs=ParticleVelocities(ids=s.part[:].id)
corr_vel = Correlator(obs1=vel_obs, tau_lin=20, tau_max=1.9, delta_N=1,
corr_operation="componentwise_product", compress1="discard1")
s.auto_update_accumulators.add(corr_vel)
# angular vel
if espressomd.has_features("ROTATION"):
omega_obs=ParticleBodyAngularVelocities(ids=s.part[:].id)
corr_omega = Correlator(obs1=omega_obs, tau_lin=40, tau_max=3.9, delta_N=1,
corr_operation="componentwise_product", compress1="discard1")
s.auto_update_accumulators.add(corr_omega)
s.integrator.run(300000)
s.auto_update_accumulators.remove(corr_vel)
corr_vel.finalize()
if espressomd.has_features("ROTATION"):
s.auto_update_accumulators.remove(corr_omega)
corr_omega.finalize()
# Verify diffusion
# Translation
# Cast gammas to vector, to make checks independent of PARTICLE_ANISOTROPY
gamma=np.ones(3) *gamma
per_part_gamma=np.ones(3) *per_part_gamma
self.verify_diffusion(p_global,corr_vel,kT,gamma)
if espressomd.has_features("LANGEVIN_PER_PARTICLE"):
self.verify_diffusion(p_gamma,corr_vel,kT,per_part_gamma)
self.verify_diffusion(p_kT,corr_vel,per_part_kT,gamma)
self.verify_diffusion(p_both,corr_vel,per_part_kT,per_part_gamma)
# Rotation
if espressomd.has_features("ROTATION"):
# Decide on effective gamma rotation, since for rotation it is direction dependent
eff_gamma_rot=None
per_part_eff_gamma_rot=None
if espressomd.has_features("PARTICLE_ANISOTROPY"):
eff_gamma_rot=gamma_rot_a
eff_per_part_gamma_rot =per_part_gamma_rot_a
else:
eff_gamma_rot=gamma_rot_i*np.ones(3)
eff_per_part_gamma_rot =per_part_gamma_rot_i *np.ones(3)
self.verify_diffusion(p_global,corr_omega,kT,eff_gamma_rot)
if espressomd.has_features("LANGEVIN_PER_PARTICLE"):
self.verify_diffusion(p_gamma,corr_omega,kT,eff_per_part_gamma_rot)
self.verify_diffusion(p_kT,corr_omega,per_part_kT,eff_gamma_rot)
self.verify_diffusion(p_both,corr_omega,per_part_kT,eff_per_part_gamma_rot)
def verify_diffusion(self,p,corr,kT,gamma):
"""Verifify diffusion coeff.
p: particle, corr: dict containing correltor with particle as key,
kT=kT, gamma=gamma as 3 component vector.
"""
c=corr
# Integral of vacf via Green-Kubo
#D= int_0^infty <v(t_0)v(t_0+t)> dt (o 1/3, since we work componentwise)
i=p.id
acf=c.result()[:,[0,2+3*i,2+3*i+1,2+3*i+2]]
np.savetxt("acf.dat",acf)
#Integrate w. trapez rule
for coord in 1,2,3:
I=np.trapz(acf[:,coord],acf[:,0])
ratio = I/(kT/gamma[coord-1])
self.assertAlmostEqual(ratio,1.,delta=0.07)
def test_00__friction_trans(self):
"""Tests the translational friction-only part of the thermostat."""
s=self.s
# Translation
gamma_t_i=2
gamma_t_a=0.5,2,1.5
v0=5.
s.time_step=0.0005
s.part.clear()
s.part.add(pos=(0,0,0),v=(v0,v0,v0))
if espressomd.has_features("MASS"):
s.part[0].mass=3
if espressomd.has_features("PARTICLE_ANISOTROPY"):
s.thermostat.set_langevin(kT=0,gamma=gamma_t_a)
else:
s.thermostat.set_langevin(kT=0,gamma=gamma_t_i)
s.time=0
for i in range(100):
s.integrator.run(10)
for j in range(3):
if espressomd.has_features("PARTICLE_ANISOTROPY"):
self.assertAlmostEqual(s.part[0].v[j],v0*np.exp(-gamma_t_a[j]/s.part[0].mass*s.time),places=2)
else:
self.assertAlmostEqual(s.part[0].v[j],v0*np.exp(-gamma_t_i/s.part[0].mass*s.time),places=2)
@ut.skipIf(not espressomd.has_features("ROTATION"), "Skipped for lack of ROTATION" )
def test_00__friction_rot(self):
"""Tests the rotational friction-only part of the thermostat."""
s=self.s
# Translation
gamma_t_i=2
gamma_t_a=0.5,2,1.5
gamma_r_i=3
gamma_r_a=1.5,0.7,1.2
o0=5.
s.time_step=0.0005
s.part.clear()
s.part.add(pos=(0,0,0),omega_body=(o0,o0,o0),rotation=(1,1,1))
if espressomd.has_features("ROTATIONAL_INERTIA"):
s.part[0].rinertia=2,2,2
if espressomd.has_features("PARTICLE_ANISOTROPY"):
s.thermostat.set_langevin(kT=0,gamma=gamma_t_a,gamma_rotation=gamma_r_a)
else:
s.thermostat.set_langevin(kT=0,gamma=gamma_t_i,gamma_rotation=gamma_r_i)
s.time=0
for i in range(100):
s.integrator.run(10)
if espressomd.has_features("ROTATIONAL_INERTIA"):
rinertia=s.part[0].rinertia
else:
rinertia=(1,1,1)
for j in range(3):
if espressomd.has_features("PARTICLE_ANISOTROPY"):
self.assertAlmostEqual(s.part[0].omega_body[j],o0*np.exp(-gamma_r_a[j]/rinertia[j]*s.time),places=2)
else:
self.assertAlmostEqual(s.part[0].omega_body[j],o0*np.exp(-gamma_r_i/rinertia[j]*s.time),places=2)
if __name__ == "__main__":
ut.main()
|
KonradBreitsprecher/espresso
|
testsuite/langevin_thermostat.py
|
Python
|
gpl-3.0
| 15,754
|
[
"ESPResSo"
] |
aa4d77a156e64903bfaddfa39bbc490b7ce5f2f0f4ca272197ceda5ea015ea2c
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ldap
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
from nose.tools import assert_true, assert_equal, assert_false
import desktop.conf
from desktop.lib.test_utils import grant_access
from desktop.lib.django_test_util import make_logged_in_client
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.core.urlresolvers import reverse
from useradmin.models import LdapGroup, UserProfile, get_profile
from hadoop import pseudo_hdfs4
from hadoop.pseudo_hdfs4 import is_live_cluster
from views import sync_ldap_users, sync_ldap_groups, import_ldap_users, import_ldap_groups, \
add_ldap_users, add_ldap_groups, sync_ldap_users_groups
import ldap_access
from tests import LdapTestConnection, reset_all_groups, reset_all_users
def test_useradmin_ldap_user_group_membership_sync():
settings.MIDDLEWARE_CLASSES.append('useradmin.middleware.LdapSynchronizationMiddleware')
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Make sure LDAP groups exist or they won't sync
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
try:
# Import curly who is part of TestUsers and Test Administrators
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'curly', sync_groups=False, import_by_dn=False)
# Set a password so that we can login
user = User.objects.get(username='curly')
user.set_password('test')
user.save()
# Should have 0 groups
assert_equal(0, user.groups.all().count())
# Make an authenticated request as curly so that we can see call middleware.
c = make_logged_in_client('curly', 'test', is_superuser=False)
grant_access("curly", "test", "useradmin")
response = c.get('/useradmin/users')
# Refresh user groups
user = User.objects.get(username='curly')
# Should have 3 groups now. 2 from LDAP and 1 from 'grant_access' call.
assert_equal(3, user.groups.all().count(), user.groups.all())
# Now remove a group and try again.
old_group = ldap_access.CACHED_LDAP_CONN._instance.users['curly']['groups'].pop()
# Make an authenticated request as curly so that we can see call middleware.
response = c.get('/useradmin/users')
# Refresh user groups
user = User.objects.get(username='curly')
# Should have 2 groups now. 1 from LDAP and 1 from 'grant_access' call.
assert_equal(3, user.groups.all().count(), user.groups.all())
finally:
settings.MIDDLEWARE_CLASSES.remove('useradmin.middleware.LdapSynchronizationMiddleware')
def test_useradmin_ldap_suboordinate_group_integration():
reset_all_users()
reset_all_groups()
reset = []
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test old subgroups
reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("suboordinate"))
try:
# Import groups only
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Should import a group, but will only sync already-imported members
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 3)
assert_equal(Group.objects.all().count(), 2)
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 2)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all().order_by('username')[1].username, larry.username)
# Only sync already imported
ldap_access.CACHED_LDAP_CONN.remove_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='moe').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 3)
assert_equal(User.objects.get(username='moe').groups.all().count(), 1)
# Import all members of TestUsers and members of subgroups
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 4)
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
finally:
for finish in reset:
finish()
def test_useradmin_ldap_nested_group_integration():
reset_all_users()
reset_all_groups()
reset = []
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test old subgroups
reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("nested"))
try:
# Import groups only
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Should import a group, but will only sync already-imported members
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 3)
assert_equal(Group.objects.all().count(), 2)
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 2)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all().order_by('username')[1].username, larry.username)
# Only sync already imported
assert_equal(test_users.user_set.all().count(), 3)
ldap_access.CACHED_LDAP_CONN.remove_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='moe').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 3)
assert_equal(User.objects.get(username='moe').groups.all().count(), 1)
# Import all members of TestUsers and not members of suboordinate groups (even though specified)
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Nested group import
# First without recursive import, then with.
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'NestedGroups', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
nested_groups = Group.objects.get(name='NestedGroups')
nested_group = Group.objects.get(name='NestedGroup')
assert_true(LdapGroup.objects.filter(group=nested_groups).exists())
assert_true(LdapGroup.objects.filter(group=nested_group).exists())
assert_equal(nested_groups.user_set.all().count(), 0, nested_groups.user_set.all())
assert_equal(nested_group.user_set.all().count(), 0, nested_group.user_set.all())
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'NestedGroups', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
nested_groups = Group.objects.get(name='NestedGroups')
nested_group = Group.objects.get(name='NestedGroup')
assert_true(LdapGroup.objects.filter(group=nested_groups).exists())
assert_true(LdapGroup.objects.filter(group=nested_group).exists())
assert_equal(nested_groups.user_set.all().count(), 0, nested_groups.user_set.all())
assert_equal(nested_group.user_set.all().count(), 1, nested_group.user_set.all())
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
finally:
for finish in reset:
finish()
def test_useradmin_ldap_suboordinate_posix_group_integration():
reset_all_users()
reset_all_groups()
reset = []
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test old subgroups
reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("suboordinate"))
try:
# Import groups only
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Should import a group, but will only sync already-imported members
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 2, User.objects.all())
assert_equal(Group.objects.all().count(), 2, Group.objects.all())
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 1)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all().order_by('username')[0].username, larry.username)
# Only sync already imported
ldap_access.CACHED_LDAP_CONN.remove_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 1)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 1)
# Import all members of PosixGroup and members of subgroups
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
finally:
for finish in reset:
finish()
def test_useradmin_ldap_nested_posix_group_integration():
reset_all_users()
reset_all_groups()
reset = []
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test nested groups
reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("nested"))
try:
# Import groups only
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Should import a group, but will only sync already-imported members
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 2, User.objects.all())
assert_equal(Group.objects.all().count(), 2, Group.objects.all())
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 1)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all().order_by('username')[0].username, larry.username)
# Only sync already imported
ldap_access.CACHED_LDAP_CONN.remove_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 1)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 1)
# Import all members of PosixGroup and members of subgroups (there should be no subgroups)
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Import all members of NestedPosixGroups and members of subgroups
reset_all_users()
reset_all_groups()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'NestedPosixGroups', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='NestedPosixGroups')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
finally:
for finish in reset:
finish()
def test_useradmin_ldap_user_integration():
done = []
try:
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Try importing a user
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'lårry', sync_groups=False, import_by_dn=False)
larry = User.objects.get(username='lårry')
assert_true(larry.first_name == 'Larry')
assert_true(larry.last_name == 'Stooge')
assert_true(larry.email == 'larry@stooges.com')
assert_true(get_profile(larry).creation_method == str(UserProfile.CreationMethod.EXTERNAL))
# Should be a noop
sync_ldap_users(ldap_access.CACHED_LDAP_CONN)
sync_ldap_groups(ldap_access.CACHED_LDAP_CONN)
assert_equal(User.objects.all().count(), 1)
assert_equal(Group.objects.all().count(), 0)
# Make sure that if a Hue user already exists with a naming collision, we
# won't overwrite any of that user's information.
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'otherguy', sync_groups=False, import_by_dn=False)
hue_user = User.objects.get(username='otherguy')
assert_equal(get_profile(hue_user).creation_method, str(UserProfile.CreationMethod.HUE))
assert_equal(hue_user.first_name, 'Different')
# Make sure LDAP groups exist or they won't sync
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
# Try importing a user and sync groups
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'curly', sync_groups=True, import_by_dn=False)
curly = User.objects.get(username='curly')
assert_equal(curly.first_name, 'Curly')
assert_equal(curly.last_name, 'Stooge')
assert_equal(curly.email, 'curly@stooges.com')
assert_equal(get_profile(curly).creation_method, str(UserProfile.CreationMethod.EXTERNAL))
assert_equal(2, curly.groups.all().count(), curly.groups.all())
reset_all_users()
reset_all_groups()
finally:
for finish in done:
finish()
def test_useradmin_ldap_case_sensitivity():
if is_live_cluster():
raise SkipTest('HUE-2897: Cannot yet guarantee database is case sensitive')
done = []
try:
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test import case sensitivity
done.append(desktop.conf.LDAP.IGNORE_USERNAME_CASE.set_for_testing(True))
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Lårry', sync_groups=False, import_by_dn=False)
assert_false(User.objects.filter(username='Lårry').exists())
assert_true(User.objects.filter(username='lårry').exists())
# Test lower case
User.objects.filter(username__iexact='Rock').delete()
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Rock', sync_groups=False, import_by_dn=False)
assert_false(User.objects.filter(username='Rock').exists())
assert_true(User.objects.filter(username='rock').exists())
done.append(desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.set_for_testing(True))
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Rock', sync_groups=False, import_by_dn=False)
assert_false(User.objects.filter(username='Rock').exists())
assert_true(User.objects.filter(username='rock').exists())
User.objects.filter(username='Rock').delete()
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Rock', sync_groups=False, import_by_dn=False)
assert_false(User.objects.filter(username='Rock').exists())
assert_true(User.objects.filter(username='rock').exists())
finally:
for finish in done:
finish()
def test_add_ldap_users():
done = []
try:
URL = reverse(add_ldap_users)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
c = make_logged_in_client('test', is_superuser=True)
assert_true(c.get(URL))
response = c.post(URL, dict(username_pattern='moe', password1='test', password2='test'))
assert_true('Location' in response, response)
assert_true('/useradmin/users' in response['Location'], response)
response = c.post(URL, dict(username_pattern='bad_name', password1='test', password2='test'))
assert_true('Could not' in response.context['form'].errors['username_pattern'][0], response)
# Test wild card
response = c.post(URL, dict(username_pattern='*rr*', password1='test', password2='test'))
assert_true('/useradmin/users' in response['Location'], response)
# Test regular with spaces (should fail)
response = c.post(URL, dict(username_pattern='user with space', password1='test', password2='test'))
assert_true("Username must not contain whitespaces and ':'" in response.context['form'].errors['username_pattern'][0], response)
# Test dn with spaces in username and dn (should fail)
response = c.post(URL, dict(username_pattern='uid=user with space,ou=People,dc=example,dc=com', password1='test', password2='test', dn=True))
assert_true("Could not get LDAP details for users in pattern" in response.content, response)
response = c.get(reverse(desktop.views.log_view))
assert_true("{username}: Username must not contain whitespaces".format(username='user with space') in response.content, response.content)
# Test dn with spaces in dn, but not username (should succeed)
response = c.post(URL, dict(username_pattern='uid=user without space,ou=People,dc=example,dc=com', password1='test', password2='test', dn=True))
assert_true(User.objects.filter(username='spaceless').exists())
finally:
for finish in done:
finish()
def test_add_ldap_users_case_sensitivity():
if is_live_cluster():
raise SkipTest('HUE-2897: Cannot yet guarantee database is case sensitive')
done = []
try:
URL = reverse(add_ldap_users)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
c = make_logged_in_client('test', is_superuser=True)
# Test ignore case
done.append(desktop.conf.LDAP.IGNORE_USERNAME_CASE.set_for_testing(True))
User.objects.filter(username='moe').delete()
assert_false(User.objects.filter(username='Moe').exists())
assert_false(User.objects.filter(username='moe').exists())
response = c.post(URL, dict(username_pattern='Moe', password1='test', password2='test'))
assert_true('Location' in response, response)
assert_true('/useradmin/users' in response['Location'], response)
assert_false(User.objects.filter(username='Moe').exists())
assert_true(User.objects.filter(username='moe').exists())
# Test lower case
done.append(desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.set_for_testing(True))
User.objects.filter(username__iexact='Rock').delete()
assert_false(User.objects.filter(username='Rock').exists())
assert_false(User.objects.filter(username='rock').exists())
response = c.post(URL, dict(username_pattern='rock', password1='test', password2='test'))
assert_true('Location' in response, response)
assert_true('/useradmin/users' in response['Location'], response)
assert_false(User.objects.filter(username='Rock').exists())
assert_true(User.objects.filter(username='rock').exists())
finally:
for finish in done:
finish()
def test_add_ldap_groups():
URL = reverse(add_ldap_groups)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
c = make_logged_in_client(username='test', is_superuser=True)
assert_true(c.get(URL))
response = c.post(URL, dict(groupname_pattern='TestUsers'))
assert_true('Location' in response, response)
assert_true('/useradmin/groups' in response['Location'])
# Test with space
response = c.post(URL, dict(groupname_pattern='Test Administrators'))
assert_true('Location' in response, response)
assert_true('/useradmin/groups' in response['Location'], response)
response = c.post(URL, dict(groupname_pattern='toolongnametoolongnametoolongnametoolongnametoolongnametoolongname'
'toolongnametoolongnametoolongnametoolongnametoolongnametoolongname'
'toolongnametoolongnametoolongnametoolongnametoolongnametoolongname'
'toolongnametoolongnametoolongnametoolongnametoolongnametoolongname'))
assert_true('Ensure this value has at most 256 characters' in response.context['form'].errors['groupname_pattern'][0], response)
# Test wild card
response = c.post(URL, dict(groupname_pattern='*r*'))
assert_true('/useradmin/groups' in response['Location'], response)
def test_sync_ldap_users_groups():
URL = reverse(sync_ldap_users_groups)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
c = make_logged_in_client('test', is_superuser=True)
assert_true(c.get(URL))
assert_true(c.post(URL))
def test_ldap_exception_handling():
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
class LdapTestConnectionError(LdapTestConnection):
def find_users(self, user, find_by_dn=False):
raise ldap.LDAPError('No such object')
ldap_access.CACHED_LDAP_CONN = LdapTestConnectionError()
c = make_logged_in_client('test', is_superuser=True)
response = c.post(reverse(add_ldap_users), dict(username_pattern='moe', password1='test', password2='test'), follow=True)
assert_true('There was an error when communicating with LDAP' in response.content, response)
@attr('requires_hadoop')
def test_ensure_home_directory_add_ldap_users():
try:
URL = reverse(add_ldap_users)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
cluster = pseudo_hdfs4.shared_cluster()
c = make_logged_in_client(cluster.superuser, is_superuser=True)
cluster.fs.setuser(cluster.superuser)
assert_true(c.get(URL))
response = c.post(URL, dict(username_pattern='moe', password1='test', password2='test'))
assert_true('/useradmin/users' in response['Location'])
assert_false(cluster.fs.exists('/user/moe'))
# Try same thing with home directory creation.
response = c.post(URL, dict(username_pattern='curly', password1='test', password2='test', ensure_home_directory=True))
assert_true('/useradmin/users' in response['Location'])
assert_true(cluster.fs.exists('/user/curly'))
response = c.post(URL, dict(username_pattern='bad_name', password1='test', password2='test'))
assert_true('Could not' in response.context['form'].errors['username_pattern'][0])
assert_false(cluster.fs.exists('/user/bad_name'))
# See if moe, who did not ask for his home directory, has a home directory.
assert_false(cluster.fs.exists('/user/moe'))
# Try wild card now
response = c.post(URL, dict(username_pattern='*rr*', password1='test', password2='test', ensure_home_directory=True))
assert_true('/useradmin/users' in response['Location'])
assert_true(cluster.fs.exists('/user/curly'))
assert_true(cluster.fs.exists(u'/user/lårry'))
assert_false(cluster.fs.exists('/user/otherguy'))
finally:
# Clean up
if cluster.fs.exists('/user/curly'):
cluster.fs.rmtree('/user/curly')
if cluster.fs.exists(u'/user/lårry'):
cluster.fs.rmtree(u'/user/lårry')
if cluster.fs.exists('/user/otherguy'):
cluster.fs.rmtree('/user/otherguy')
@attr('requires_hadoop')
def test_ensure_home_directory_sync_ldap_users_groups():
URL = reverse(sync_ldap_users_groups)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
cluster = pseudo_hdfs4.shared_cluster()
c = make_logged_in_client(cluster.superuser, is_superuser=True)
cluster.fs.setuser(cluster.superuser)
c.post(reverse(add_ldap_users), dict(username_pattern='curly', password1='test', password2='test'))
assert_false(cluster.fs.exists('/user/curly'))
assert_true(c.post(URL, dict(ensure_home_directory=True)))
assert_true(cluster.fs.exists('/user/curly'))
|
sanjeevtripurari/hue
|
apps/useradmin/src/useradmin/test_ldap_deprecated.py
|
Python
|
apache-2.0
| 33,296
|
[
"MOE"
] |
a24121088ffd23a9a32ea7d99edc60067856a19a6bfa4e17d352bc835c9aa9ee
|
from hellosign_sdk.utils import HSRequest, HSException, NoAuthMethod, HSAccessTokenAuth, HSFormat, api_resource, api_resource_list
from hellosign_sdk.resource import Account, ApiApp, SignatureRequest, Template, Team, Embedded, UnclaimedDraft
from requests.auth import HTTPBasicAuth
import json
#
# The MIT License (MIT)
#
# Copyright (C) 2014 hellosign.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
class HSClient(object):
''' Client object to interact with the API urls
Most of the operations of the SDK is made through this object. Please refer
to the README.rst file for more details on how to use the client object.
'''
version = '4.0.0' # SDK version
API_VERSION = 'v3' # API version
API_URL = ''
ACCOUNT_CREATE_URL = ''
ACCOUNT_INFO_URL = ''
ACCOUNT_UPDATE_URL = ''
ACCOUNT_VERIFY_URL = ''
SIGNATURE_REQUEST_INFO_URL = ''
SIGNATURE_REQUEST_LIST_URL = ''
SIGNATURE_REQUEST_DOWNLOAD_PDF_URL = ''
SIGNATURE_REQUEST_CREATE_URL = ''
SIGNATURE_REQUEST_CREATE_WITH_TEMPLATE_URL = ''
SIGNATURE_REQUEST_REMIND_URL = ''
SIGNATURE_REQUEST_CANCEL_URL = ''
SIGNATURE_REQUEST_CREATE_EMBEDDED_URL = ''
SIGNATURE_REQUEST_CREATE_EMBEDDED_WITH_TEMPLATE_URL = ''
EMBEDDED_OBJECT_GET_URL = ''
EMBEDDED_TEMPLATE_EDIT_URL = ''
UNCLAIMED_DRAFT_CREATE_URL = ''
UNCLAIMED_DRAFT_CREATE_EMBEDDED_URL = ''
UNCLAIMED_DRAFT_CREATE_EMBEDDED_WITH_TEMPLATE_URL = ''
UNCLAIMED_DRAFT_EDIT_AND_RESEND_URL = ''
TEMPLATE_GET_URL = ''
TEMPLATE_GET_LIST_URL = ''
TEMPLATE_GET_FILES_URL = ''
TEMPLATE_DELETE_URL = ''
TEMPLATE_ADD_USER_URL = ''
TEMPLATE_REMOVE_USER_URL = ''
TEMPLATE_CREATE_EMBEDDED_DRAFT_URL = ''
TEAM_INFO_URL = ''
TEAM_UPDATE_URL = ''
TEAM_CREATE_URL = ''
TEAM_DESTROY_URL = ''
TEAM_ADD_MEMBER_URL = ''
TEAM_REMOVE_MEMBER_URL = ''
API_APP_INFO_URL = ''
API_APP_LIST_URL = ''
API_APP_CREATE_URL = ''
API_APP_UPDATE_URL = ''
API_APP_DELETE_URL = ''
OAUTH_TOKEN_URL = ''
request = None
response_callback = None
def __init__(self, email_address=None, password=None, api_key=None, access_token=None, access_token_type='Bearer', env='production'):
'''Initialize the client object with authentication information to send requests
Args:
email_address (str): E-mail of the account to make the requests
password (str): Password of the account used with email address
api_key (str): API Key. You can find your API key in https://app.hellosign.com/home/myAccount/current_tab/integrations#api
access_token (str): OAuth access token to use
access_token_type (str): Type of OAuth token (defaults to Bearer, which is the only value supported for now)
'''
super(HSClient, self).__init__()
self.auth = self._authenticate(email_address, password, api_key, access_token, access_token_type)
self.account = Account()
self.env = env
self._init_endpoints()
def __str__(self):
''' Return a string description of this object '''
return "HelloSign Client %s" % self.version
def _init_endpoints(self):
API_PRODUCTION_URL = "https://api.hellosign.com"
API_DEV_URL = "https://api.dev-hellosign.com"
API_STAGING_URL = "https://api.staging-hellosign.com"
WEB_PRODUCTION_URL = "https://app.hellosign.com"
WEB_DEV_URL = "https://app.dev-hellosign.com"
WEB_STAGING_URL = "https://app.staging-hellosign.com"
if self.env == "production":
self.API_URL = API_PRODUCTION_URL + '/' + self.API_VERSION
self.OAUTH_TOKEN_URL = WEB_PRODUCTION_URL + '/oauth/token'
elif self.env == "dev":
self.API_URL = API_DEV_URL + '/' + self.API_VERSION
self.OAUTH_TOKEN_URL = WEB_DEV_URL + '/oauth/token'
print("WARNING: Using dev api endpoint %s" % self.API_URL)
elif self.env == "staging":
self.API_URL = API_STAGING_URL + '/' + self.API_VERSION
self.OAUTH_TOKEN_URL = WEB_STAGING_URL + '/oauth/token'
print("WARNING: Using staging api endpoint %s" % self.API_URL)
self.ACCOUNT_CREATE_URL = self.API_URL + '/account/create'
self.ACCOUNT_INFO_URL = self.API_URL + '/account'
self.ACCOUNT_UPDATE_URL = self.API_URL + '/account'
self.ACCOUNT_VERIFY_URL = self.API_URL + '/account/verify'
self.SIGNATURE_REQUEST_INFO_URL = self.API_URL + '/signature_request/'
self.SIGNATURE_REQUEST_LIST_URL = self.API_URL + '/signature_request/list'
self.SIGNATURE_REQUEST_DOWNLOAD_PDF_URL = self.API_URL + '/signature_request/files/'
self.SIGNATURE_REQUEST_CREATE_URL = self.API_URL + '/signature_request/send'
self.SIGNATURE_REQUEST_CREATE_WITH_TEMPLATE_URL = self.API_URL + '/signature_request/send_with_template'
self.SIGNATURE_REQUEST_REMIND_URL = self.API_URL + '/signature_request/remind/'
self.SIGNATURE_REQUEST_UPDATE_URL = self.API_URL + '/signature_request/update/'
self.SIGNATURE_REQUEST_CANCEL_URL = self.API_URL + '/signature_request/cancel/'
self.SIGNATURE_REQUEST_REMOVE_ACCESS_URL = self.API_URL + '/signature_request/remove/'
self.SIGNATURE_REQUEST_CREATE_EMBEDDED_URL = self.API_URL + '/signature_request/create_embedded'
self.SIGNATURE_REQUEST_CREATE_EMBEDDED_WITH_TEMPLATE_URL = self.API_URL + '/signature_request/create_embedded_with_template'
self.EMBEDDED_OBJECT_GET_URL = self.API_URL + '/embedded/sign_url/'
self.EMBEDDED_TEMPLATE_EDIT_URL = self.API_URL + '/embedded/edit_url/'
self.UNCLAIMED_DRAFT_CREATE_URL = self.API_URL + '/unclaimed_draft/create'
self.UNCLAIMED_DRAFT_CREATE_EMBEDDED_URL = self.API_URL + '/unclaimed_draft/create_embedded'
self.UNCLAIMED_DRAFT_CREATE_EMBEDDED_WITH_TEMPLATE_URL = self.API_URL + '/unclaimed_draft/create_embedded_with_template'
self.UNCLAIMED_DRAFT_EDIT_AND_RESEND_URL = self.API_URL + '/unclaimed_draft/edit_and_resend/'
self.TEMPLATE_GET_URL = self.API_URL + '/template/'
self.TEMPLATE_GET_LIST_URL = self.API_URL + '/template/list'
self.TEMPLATE_GET_FILES_URL = self.API_URL + '/template/files/'
self.TEMPLATE_DELETE_URL = self.API_URL + '/template/delete/'
self.TEMPLATE_ADD_USER_URL = self.API_URL + '/template/add_user/'
self.TEMPLATE_REMOVE_USER_URL = self.API_URL + '/template/remove_user/'
self.TEMPLATE_CREATE_EMBEDDED_DRAFT_URL = self.API_URL + '/template/create_embedded_draft'
self.TEMPLATE_UPDATE_FILES_URL = self.API_URL + '/template/update_files/'
self.TEAM_INFO_URL = self.API_URL + '/team'
self.TEAM_UPDATE_URL = self.TEAM_INFO_URL
self.TEAM_CREATE_URL = self.API_URL + '/team/create'
self.TEAM_DESTROY_URL = self.API_URL + '/team/destroy'
self.TEAM_ADD_MEMBER_URL = self.API_URL + '/team/add_member'
self.TEAM_REMOVE_MEMBER_URL = self.API_URL + '/team/remove_member'
self.API_APP_INFO_URL = self.API_URL + '/api_app/'
self.API_APP_LIST_URL = self.API_URL + '/api_app/list'
self.API_APP_CREATE_URL = self.API_URL + '/api_app'
self.API_APP_UPDATE_URL = self.API_APP_INFO_URL
self.API_APP_DELETE_URL = self.API_APP_INFO_URL
# ---- ACCOUNT METHODS -----------------------------
@api_resource(Account)
def create_account(self, email_address, password=None, client_id=None, client_secret=None):
''' Create a new account.
If the account is created via an app, then Account.oauth will contain the
OAuth data that can be used to execute actions on behalf of the newly created account.
Args:
email_address (str): Email address of the new account to create
password (str): [DEPRECATED] This parameter will be ignored
client_id (str, optional): Client id of the app to use to create this account
client_secret (str, optional): Secret of the app to use to create this account
Returns:
The new Account object
'''
request = self._get_request()
params = {
'email_address': email_address
}
if client_id:
params['client_id'] = client_id
params['client_secret'] = client_secret
response = request.post(self.ACCOUNT_CREATE_URL, params)
if 'oauth_data' in response:
response["account"]["oauth"] = response['oauth_data']
return response
# Get account info and put in self.account so that further access to the
# info can be made by using self.account.attribute
def get_account_info(self):
''' Get current account information
The information then will be saved in `self.account` so that you can
access the information like this:
>>> hsclient = HSClient()
>>> acct = hsclient.get_account_info()
>>> print acct.email_address
Returns:
An Account object
'''
request = self._get_request()
response = request.get(self.ACCOUNT_INFO_URL)
self.account.json_data = response["account"]
return self.account
# At the moment you can only update your callback_url only
@api_resource(Account)
def update_account_info(self):
''' Update current account information
At the moment you can only update your callback_url.
Returns:
An Account object
'''
request = self._get_request()
return request.post(self.ACCOUNT_UPDATE_URL, {
'callback_url': self.account.callback_url
})
def verify_account(self, email_address):
''' Verify whether a HelloSign Account exists
Args:
email_address (str): Email address of the new account to create
Returns:
True or False
'''
request = self._get_request()
resp = request.post(self.ACCOUNT_VERIFY_URL, {
'email_address': email_address
})
return ('account' in resp)
# ---- SIGNATURE REQUEST METHODS -------------------
@api_resource(SignatureRequest)
def get_signature_request(self, signature_request_id):
''' Get a signature request by its ID
Args:
signature_request_id (str): The id of the SignatureRequest to retrieve
Returns:
A SignatureRequest object
'''
request = self._get_request()
parameters = None
return request.get(self.SIGNATURE_REQUEST_INFO_URL + signature_request_id, parameters=parameters)
@api_resource_list(SignatureRequest)
def get_signature_request_list(self, page=1, page_size=None):
''' Get a list of SignatureRequest that you can access
This includes SignatureRequests you have sent as well as received, but
not ones that you have been CCed on.
Args:
page (int, optional): Which page number of the SignatureRequest list to return. Defaults to 1.
page_size (int, optional): Number of SignatureRequests to return per page. When not explicit
it defaults to 20.
Returns:
A ResourceList object
'''
request = self._get_request()
parameters = {
"page": page,
"page_size": page_size
}
return request.get(self.SIGNATURE_REQUEST_LIST_URL, parameters=parameters)
def get_signature_request_file(self, signature_request_id, path_or_file=None, file_type=None, filename=None, response_type=None):
''' Download the PDF copy of the current documents
Args:
signature_request_id (str): Id of the signature request
path_or_file (str or file): A writable File-like object or a full path to save the PDF file to.
filename (str): [DEPRECATED] Filename to save the PDF file to. This should be a full path.
file_type (str): Type of file to return. Either "pdf" for a single merged document or "zip"
for a collection of individual documents. Defaults to "pdf" if not specified.
response_type (str): File type of response to return. Either "url" to return a URL link to the file
or "data_uri" to return the file as a base64 encoded string. Only applicable to the "pdf" file_type.
Returns:
Returns a PDF file, URL link to file, or base64 encoded file
'''
request = self._get_request()
url = self.SIGNATURE_REQUEST_DOWNLOAD_PDF_URL + signature_request_id
if response_type == 'url':
url += '?get_url=1'
elif response_type == 'data_uri':
url += '?get_data_uri=1'
else:
if file_type:
url += '?file_type=%s' % file_type
return request.get_file(url, path_or_file or filename)
return request.get(url)
def send_signature_request(self, test_mode=False, client_id=None, files=None, file_urls=None,
title=None, subject=None, message=None, signing_redirect_url=None,
signers=None, cc_email_addresses=None, form_fields_per_document=None,
use_text_tags=False, hide_text_tags=False, custom_fields=None,
metadata=None, allow_decline=False, allow_reassign=False, signing_options=None, attachments=None):
''' Creates and sends a new SignatureRequest with the submitted documents
Creates and sends a new SignatureRequest with the submitted documents.
If form_fields_per_document is not specified, a signature page will be
affixed where all signers will be required to add their signature,
signifying their agreement to all contained documents.
Args:
test_mode (bool, optional): Whether this is a test, the signature request will not be legally binding
if set to True. Defaults to False.
client_id (str): Pass client_id. For non embedded requests this can be used for white-labeling
files (list of str): The uploaded file(s) to send for signature.
file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls`
title (str, optional): The title you want to assign to the SignatureRequest.
subject (str, optional): The subject in the email that will be sent to the signers.
message (str, optional): The custom message in the email that will be sent to the signers.
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
signers (list of dict): A list of signers, which each has the following attributes:
name (str): The name of the signer
email_address (str): Email address of the signer
order (str, optional): The order the signer is required to sign in
pin (str, optional): The 4- to 12-character access code that will secure this signer's signature page
cc_email_addresses (list, optional): A list of email addresses that should be CC'd on the request.
form_fields_per_document (str or list of dict, optional): The signer components that should appear on the document, expressed as a serialized
JSON data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://app.hellosign.com/api/reference#SignatureRequest).
use_text_tags (bool, optional): Use text tags in the provided file(s) to specify signer components.
hide_text_tags (bool, optional): Hide text tag areas.
custom_fields (list of dict, optional): A list of custom fields defined by Text Tags for Form Fields per Document.
An item of the list should look like this: `{'name: value'}`
metadata (dict, optional): Metadata associated with the signature request.
allow_decline (bool, optional): Allows signers to decline to sign a document if set to True. Defaults to False.
allow_reassign (bool, optional): Allows signers to reassign their signature requests to other signers if set to True. Defaults to False.
signing_options (dict, optional): Allows the requester to specify the types allowed for creating a signature. Defaults to account settings.
attachments (list of dict): A list of attachments, each with the following attributes:
name (str): The name of the attachment
instructions (str): The instructions for uploading the attachment
signer_index (int): The index of the signer who needs to upload the attachments, see signers parameter for more details
required (bool, optional): Determines if the attachment must be uploaded
Returns:
A SignatureRequest object
'''
self._check_required_fields({
"signers": signers
}, [{
"files": files,
"file_urls": file_urls
}]
)
params = {
'test_mode': test_mode,
'client_id': client_id,
'files': files,
'file_urls': file_urls,
'title': title,
'subject': subject,
'message': message,
'signing_redirect_url': signing_redirect_url,
'signers': signers,
'cc_email_addresses': cc_email_addresses,
'form_fields_per_document': form_fields_per_document,
'use_text_tags': use_text_tags,
'hide_text_tags': hide_text_tags,
'custom_fields': custom_fields,
'metadata': metadata,
'allow_decline': allow_decline,
'allow_reassign': allow_reassign,
'signing_options': signing_options,
'attachments': attachments
}
return self._send_signature_request(**params)
def send_signature_request_with_template(self, test_mode=False, template_id=None,
template_ids=None, title=None, subject=None, message=None,
signing_redirect_url=None, signers=None, ccs=None, custom_fields=None,
metadata=None, allow_decline=False, files=None, file_urls=None, signing_options=None):
''' Creates and sends a new SignatureRequest based off of a Template
Creates and sends a new SignatureRequest based off of the Template
specified with the template_id parameter.
Args:
test_mode (bool, optional): Whether this is a test, the signature request will not be legally binding if set to True. Defaults to False.
template_id (str): The id of the Template to use when creating the SignatureRequest. Mutually exclusive with template_ids.
template_ids (list): The ids of the Templates to use when creating the SignatureRequest. Mutually exclusive with template_id.
title (str, optional): The title you want to assign to the SignatureRequest
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
signers (list of dict): A list of signers, which each has the following attributes:
role_name (str): Signer role
name (str): The name of the signer
email_address (str): Email address of the signer
pin (str, optional): The 4- to 12-character access code that will secure this signer's signature page
ccs (list of str, optional): The email address of the CC filling the role of RoleName.
Required when a CC role exists for the Template. Each dict has the following attributes:
role_name (str): CC role name
email_address (str): CC email address
custom_fields (list of dict, optional): A list of custom fields.
Required when a CustomField exists in the Template. An item of the list should look like this: `{'name: value'}`
metadata (dict, optional): Metadata to associate with the signature request
allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0.
files (list of str): The uploaded file(s) to append to the Signature Request.
file_urls (list of str): URLs of the file for HelloSign to download to append to the Signature Request.
Use either `files` or `file_urls`
signing_options (dict, optional): Allows the requester to specify the types allowed for creating a signature.
Defaults to account settings.
Returns:
A SignatureRequest object
'''
self._check_required_fields({
"signers": signers
}, [{
"template_id": template_id,
"template_ids": template_ids,
"files": files,
"file_urls": file_urls
}]
)
params = {
'test_mode': test_mode,
'template_id': template_id,
'template_ids': template_ids,
'title': title,
'subject': subject,
'message': message,
'signing_redirect_url': signing_redirect_url,
'signers': signers,
'ccs': ccs,
'custom_fields': custom_fields,
'metadata': metadata,
'allow_decline': allow_decline,
'files': files,
'file_urls': file_urls,
'signing_options': signing_options
}
return self._send_signature_request_with_template(**params)
@api_resource(SignatureRequest)
def remind_signature_request(self, signature_request_id, email_address, name=None):
''' Sends an email to the signer reminding them to sign the signature request
Sends an email to the signer reminding them to sign the signature
request. You cannot send a reminder within 1 hours of the last reminder
that was sent. This includes manual AND automatic reminders.
Args:
signature_request_id (str): The id of the SignatureRequest to send a reminder for
email_address (str): The email address of the signer to send a reminder to
name (str, optional): The name of the signer to send a reminder to
Returns:
A SignatureRequest object
'''
request = self._get_request()
return request.post(self.SIGNATURE_REQUEST_REMIND_URL + signature_request_id, data={
"email_address": email_address,
"name": name
})
@api_resource(SignatureRequest)
def update_signature_request(self, signature_request_id, signature_id, email_address):
''' Updates the email address for a given signer on a signature request.
Args:
signature_request_id (str): The id of the SignatureRequest to update
signature_id (str): The signature id for the recipient
email_address (str): The new email address of the recipient
Returns:
A SignatureRequest object
'''
request = self._get_request()
return request.post(self.SIGNATURE_REQUEST_UPDATE_URL + signature_request_id, data={
"signature_id": signature_id,
"email_address": email_address
})
def cancel_signature_request(self, signature_request_id):
''' Cancels a SignatureRequest
Cancels a SignatureRequest. After canceling, no one will be able to sign
or access the SignatureRequest or its documents. Only the requester can
cancel and only before everyone has signed.
Args:
signature_request_id (str): The id of the signature request to cancel
Returns:
None
'''
request = self._get_request()
request.post(url=self.SIGNATURE_REQUEST_CANCEL_URL + signature_request_id, get_json=False)
def remove_signature_request_access(self, signature_request_id):
''' Removes your access to a completed SignatureRequest
The SignatureRequest must be fully executed by all parties (signed or declined to sign).
Other parties will continue to maintain access to the completed signature request document(s).
Args:
signature_request_id (str): The id of the signature request to remove
Returns:
None
'''
request = self._get_request()
request.post(url=self.SIGNATURE_REQUEST_REMOVE_ACCESS_URL + signature_request_id, get_json=False)
def send_signature_request_embedded(self, test_mode=False, client_id=None,
files=None, file_urls=None, title=None, subject=None, message=None,
signing_redirect_url=None, signers=None, cc_email_addresses=None,
form_fields_per_document=None, use_text_tags=False, hide_text_tags=False,
metadata=None, allow_decline=False, allow_reassign=False, signing_options=None, attachments=None):
''' Creates and sends a new SignatureRequest with the submitted documents
Creates a new SignatureRequest with the submitted documents to be signed
in an embedded iFrame. If form_fields_per_document or text tags are not specified, a
signature page will be affixed where all signers will be required to add
their signature, signifying their agreement to all contained documents.
Note that embedded signature requests can only be signed in embedded
iFrames whereas normal signature requests can only be signed on
HelloSign.
Args:
test_mode (bool, optional): Whether this is a test, the signature request will not be legally binding if set to True. Defaults to False.
client_id (str): Client id of the app you're using to create this embedded signature request.
Visit the embedded page to learn more about this parameter (https://www.hellosign.com/api/embeddedSigningWalkthrough)
files (list of str): The uploaded file(s) to send for signature
file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls`
title (str, optional): The title you want to assign to the SignatureRequest
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
signers (list of dict): A list of signers, which each has the following attributes:
name (str): The name of the signer
email_address (str): Email address of the signer
order (str, optional): The order the signer is required to sign in
pin (str, optional): The 4- to 12-character access code that will secure this signer's signature page
cc_email_addresses (list, optional): A list of email addresses that should be CCed
form_fields_per_document (str or list of dict, optional): The fields that should appear on the document, expressed as a serialized
JSON data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://www.hellosign.com/api/reference#SignatureRequest)
use_text_tags (bool, optional): Use text tags in the provided file(s) to create form fields
hide_text_tags (bool, optional): Hide text tag areas
metadata (dict, optional): Metadata to associate with the signature request
allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0.
allow_reassign (bool, optional): Allows signers to reassign their signature requests to other signers if set to True. Defaults to False.
signing_options (dict, optional): Allows the requester to specify the types allowed for creating a signature. Defaults to account settings.
attachments (list of dict): A list of attachments, each with the following attributes:
name (str): The name of attachment
instructions (str): The instructions for uploading the attachment
signer_index (int): The signer's index whose needs to upload the attachments, see signers parameter for more details
required (bool, optional): Determines if the attachment must be uploaded
Returns:
A SignatureRequest object
'''
self._check_required_fields({
"signers": signers,
"client_id": client_id
}, [{
"files": files,
"file_urls": file_urls
}]
)
params = {
'test_mode': test_mode,
'client_id': client_id,
'files': files,
'file_urls': file_urls,
'title': title,
'subject': subject,
'message': message,
'signing_redirect_url': signing_redirect_url,
'signers': signers,
'cc_email_addresses': cc_email_addresses,
'form_fields_per_document': form_fields_per_document,
'use_text_tags': use_text_tags,
'hide_text_tags': hide_text_tags,
'metadata': metadata,
'allow_decline': allow_decline,
'allow_reassign': allow_reassign,
'signing_options': signing_options,
'is_for_embedded_signing': True,
'attachments': attachments
}
return self._send_signature_request(**params)
def send_signature_request_embedded_with_template(self, test_mode=False,
client_id=None, template_id=None, template_ids=None, title=None,
subject=None, message=None, signing_redirect_url=None, signers=None,
ccs=None, custom_fields=None, metadata=None, allow_decline=False,
files=None, file_urls=None, signing_options=None):
''' Creates and sends a new SignatureRequest based off of a Template
Creates a new SignatureRequest based on the given Template to be
signed in an embedded iFrame. Note that embedded signature requests can
only be signed in embedded iFrames whereas normal signature requests can
only be signed on HelloSign.
Args:
test_mode (bool, optional): Whether this is a test, the signature request will not be legally binding if set to True. Defaults to False.
client_id (str): Client id of the app you're using to create this embedded signature request.
Visit the embedded page to learn more about this parameter (https://app.hellosign.com/api/embeddedSigningWalkthrough)
template_id (str): The id of the Template to use when creating the SignatureRequest. Mutually exclusive with template_ids.
template_ids (list): The ids of the Templates to use when creating the SignatureRequest. Mutually exclusive with template_id.
title (str, optional): The title you want to assign to the SignatureRequest
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
signers (list of dict): A list of signers, which each has the following attributes:
name (str): The name of the signer
email_address (str): Email address of the signer
pin (str, optional): The 4- to 12-character access code that will secure this signer's signature page
ccs (list of dict, optional): The email address of the CC filling the role of RoleName.
Required when a CC role exists for the Template. Each dict has the following attributes:
role_name (str): CC role name
email_address (str): CC email address
custom_fields (list of dict, optional): A list of custom fields. Required when a CustomField exists in the Template.
An item of the list should look like this: `{'name: value'}`
metadata (dict, optional): Metadata to associate with the signature request
allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0.
files (list of str): The uploaded file(s) to append to the Signature Request.
file_urls (list of str): URLs of the file for HelloSign to download to append to the Signature Request. Use either `files` or `file_urls`
signing_options (dict, optional): Allows the requester to specify the types allowed for creating a signature. Defaults to account settings.
Returns:
A SignatureRequest object
'''
self._check_required_fields({
"signers": signers,
"client_id": client_id
}, [{
"template_id": template_id,
"template_ids": template_ids,
"files": files,
"file_urls": file_urls
}]
)
params = {
'test_mode': test_mode,
'client_id': client_id,
'template_id': template_id,
'template_ids': template_ids,
'title': title,
'subject': subject,
'message': message,
'signing_redirect_url': signing_redirect_url,
'signers': signers,
'ccs': ccs,
'custom_fields': custom_fields,
'metadata': metadata,
'allow_decline': allow_decline,
'files': files,
'file_urls': file_urls,
'signing_options': signing_options
}
return self._send_signature_request_with_template(**params)
# ---- TEMPLATE METHODS -----------------------
@api_resource(Template)
def get_template(self, template_id):
''' Gets a Template which includes a list of Accounts that can access it
Args:
template_id (str): The id of the template to retrieve
Returns:
A Template object
'''
request = self._get_request()
return request.get(self.TEMPLATE_GET_URL + template_id)
@api_resource_list(Template)
def get_template_list(self, page=1, page_size=None, account_id=None, query=None):
''' Lists your Templates
Args:
page (int, optional): Page number of the template List to return. Defaults to 1.
page_size (int, optional): Number of objects to be returned per page, must be between 1 and 100, default is 20.
account_id (str, optional): Which account to return Templates for. Must be a team member.
Use "all" to indicate all team members. Defaults to your account.
query (str, optional): String that includes search terms and/or fields to be used to filter the Template objects.
Returns:
A ResourceList object
'''
request = self._get_request()
parameters = {
'page': page,
'page_size': page_size,
'account_id': account_id,
'query': query
}
return request.get(self.TEMPLATE_GET_LIST_URL, parameters=parameters)
# RECOMMEND: this api does not fail if the user has been added...
def add_user_to_template(self, template_id, account_id=None, email_address=None):
''' Gives the specified Account access to the specified Template
Args:
template_id (str): The id of the template to give the account access to
account_id (str): The id of the account to give access to the template. The account id prevails if both account_id and email_address are provided.
email_address (str): The email address of the account to give access to.
Returns:
A Template object
'''
return self._add_remove_user_template(self.TEMPLATE_ADD_USER_URL, template_id, account_id, email_address)
def remove_user_from_template(self, template_id, account_id=None, email_address=None):
''' Removes the specified Account's access to the specified Template
Args:
template_id (str): The id of the template to remove the account's access from.
account_id (str): The id of the account to remove access from the template.
The account id prevails if both account_id and email_address are provided.
email_address (str): The email address of the account to remove access from.
Returns:
An Template object
'''
return self._add_remove_user_template(self.TEMPLATE_REMOVE_USER_URL, template_id, account_id, email_address)
def delete_template(self, template_id):
''' Deletes the specified template
Args:
template_id (str): The id of the template to delete
Returns:
A status code
'''
url = self.TEMPLATE_DELETE_URL
request = self._get_request()
response = request.post(url + template_id, get_json=False)
return response
def get_template_files(self, template_id, path_or_file=None, file_type=None,
filename=None, response_type=None):
''' Downloads a copy of a template's original files
Args:
template_id (str): id of the template to download
path_or_file (str or file): A writable File-like object or a full path to save the PDF file to.
filename (str): [DEPRECATED] Filename to save the PDF file to. This should be a full path.
file_type (str): Type of file to return. Either "pdf" for a single merged document or
"zip" for a collection of individual documents. Defaults to "pdf" if not specified.
response_type (str): File type of response to return. Either "url" to return a URL link to the file
or "data_uri" to return the file as a base64 encoded string. Only applicable to the "pdf" file_type.
Returns:
Returns a PDF file, URL link to file, or base64 encoded file
'''
request = self._get_request()
url = self.TEMPLATE_GET_FILES_URL + template_id
if file_type:
url += '?file_type=%s' % file_type
return request.get_file(url, path_or_file or filename)
if response_type == 'url':
url += '?get_url=1'
elif response_type == 'data_uri':
url += '?get_data_uri=1'
return request.get(url)
def update_template_files(self, template_id, files=None, file_urls=None,
subject=None, message=None, client_id=None, test_mode=False):
''' Overlays a new file with the overlay of an existing template.
Args:
template_id (str): The id of the template whose files to update
files (list of str): The file(s) to use for the template.
file_urls (list of str): URLs of the file for HelloSign to use for the template.
Use either `files` or `file_urls`, but not both.
subject (str, optional): The default template email subject
message (str, optional): The default template email message
test_mode (bool, optional): Whether this is a test, the signature request created
from this Template will not be legally binding if set to 1. Defaults to 0.
client_id (str): Client id of the app associated with the Template
Returns:
A Template object
'''
request = self._get_request()
return request.post(self.TEMPLATE_UPDATE_FILES_URL + template_id, data={
"files": files,
"file_urls": file_urls,
"subject": subject,
"message": message,
"test_mode": self._boolean(test_mode),
"client_id": client_id
})
def create_embedded_template_draft(self, client_id, signer_roles, test_mode=False,
files=None, file_urls=None, title=None, subject=None, message=None,
cc_roles=None, merge_fields=None, skip_me_now=False, use_preexisting_fields=False,
allow_reassign=False, metadata=None, allow_ccs=False, attachments=None):
''' Creates an embedded Template draft for further editing.
Args:
test_mode (bool, optional): Whether this is a test, the signature request
created from this draft will not be legally binding if set to 1. Defaults to 0.
client_id (str): Client id of the app you're using to create this draft.
files (list of str): The file(s) to use for the template.
file_urls (list of str): URLs of the file for HelloSign to use for the template.
Use either `files` or `file_urls`, but not both.
title (str, optional): The template title
subject (str, optional): The default template email subject
message (str, optional): The default template email message
signer_roles (list of dict): A list of signer roles, each of which has the following attributes:
name (str): The role name of the signer that will be displayed when the
template is used to create a signature request.
order (str, optional): The order in which this signer role is required to sign.
cc_roles (list of str, optional): The CC roles that must be assigned when using the template to send a signature request
merge_fields (list of dict, optional): The merge fields that can be placed on the template's
document(s) by the user claiming the template draft. Each must have the following two parameters:
name (str): The name of the merge field. Must be unique.
type (str): Can only be "text" or "checkbox".
skip_me_now (bool, optional): Disables the "Me (Now)" option for the document's preparer. Defaults to 0.
use_preexisting_fields (bool, optional): Whether to use preexisting PDF fields
metadata (dict, optional): Metadata to associate with the draft
allow_reassign (bool, optional): Allows signers to reassign their signature
requests to other signers if set to True. Defaults to False.
allow_ccs (bool, optional): Specifies whether the user is allowed to
provide email addresses to CC when creating a template. Defaults to False.
attachments (list of dict): A list of attachments, each with the following attributes:
name (str): The name of the attachment
instructions (str): The instructions for uploading the attachment
signer_index (int): The index of the signer who needs to upload the attachments, see signers parameter for more details
required (bool, optional): Determines if the attachment must be uploaded
Returns:
A Template object specifying the id of the draft
'''
params = {
'test_mode': test_mode,
'client_id': client_id,
'files': files,
'file_urls': file_urls,
'title': title,
'subject': subject,
'message': message,
'signer_roles': signer_roles,
'cc_roles': cc_roles,
'merge_fields': merge_fields,
'skip_me_now': skip_me_now,
'use_preexisting_fields': use_preexisting_fields,
'metadata': metadata,
'allow_reassign': allow_reassign,
'allow_ccs': allow_ccs,
'attachments': attachments
}
return self._create_embedded_template_draft(**params)
# ---- TEAM METHODS --------------------------------
@api_resource(Team)
def get_team_info(self):
''' Gets your Team and a list of its members
Returns information about your team as well as a list of its members.
If you do not belong to a team, a 404 error with an error_name of
"not_found" will be returned.
Returns:
A Team object
'''
request = self._get_request()
return request.get(self.TEAM_INFO_URL)
@api_resource(Team)
def create_team(self, name):
''' Creates a new Team
Creates a new Team and makes you a member. You must not currently belong to a team to invoke.
Args:
name (str): The name of your team
Returns:
A Team object
'''
request = self._get_request()
return request.post(self.TEAM_CREATE_URL, {"name": name})
# RECOMMEND: The api event create a new team if you do not belong to any team
@api_resource(Team)
def update_team_name(self, name):
''' Updates a Team's name
Args:
name (str): The new name of your team
Returns:
A Team object
'''
request = self._get_request()
return request.post(self.TEAM_UPDATE_URL, {"name": name})
def destroy_team(self):
''' Delete your Team
Deletes your Team. Can only be invoked when you have a team with only one member left (yourself).
Returns:
None
'''
request = self._get_request()
request.post(url=self.TEAM_DESTROY_URL, get_json=False)
def add_team_member(self, account_id=None, email_address=None):
''' Add or invite a user to your Team
Args:
account_id (str): The id of the account of the user to invite to your team.
email_address (str): The email address of the account to invite to your team.
The account id prevails if both account_id and email_address are provided.
Returns:
A Team object
'''
return self._add_remove_team_member(self.TEAM_ADD_MEMBER_URL, email_address, account_id)
# RECOMMEND: Does not fail if user has been removed
def remove_team_member(self, account_id=None, email_address=None):
''' Remove a user from your Team
Args:
account_id (str): The id of the account of the user to remove from your team.
email_address (str): The email address of the account to remove from your team.
The account id prevails if both account_id and email_address are provided.
Returns:
A Team object
'''
return self._add_remove_team_member(self.TEAM_REMOVE_MEMBER_URL, email_address, account_id)
# ---- EMBEDDED METHODS ----------------------------
@api_resource(Embedded)
def get_embedded_object(self, signature_id):
''' Retrieves an embedded signing object
Retrieves an embedded object containing a signature url that can be opened in an iFrame.
Args:
signature_id (str): The id of the signature to get a signature url for
Returns:
An Embedded object
'''
request = self._get_request()
return request.get(self.EMBEDDED_OBJECT_GET_URL + signature_id)
@api_resource(Embedded)
def get_template_edit_url(self, template_id, test_mode=False, cc_roles=None,
merge_fields=None, skip_signer_roles=False, skip_subject_message=False):
''' Retrieves a embedded template for editing
Retrieves an embedded object containing a template edit url that can be opened in an iFrame.
Args:
template_id (str): The id of the template to get an edit url for
test_mode (bool, optional): Whether this is a test, the signature requests created
from this template will not be legally binding if set to True. Defaults to False.
cc_roles (list of str, optional): The CC roles that must be assigned when using
the template to send a signature request
merge_fields (list of dict, optional): The merge fields that can be placed on the template's document(s)
by the user claiming the template draft. Each must have the following two parameters:
name (str): The name of the merge field. Must be unique.
type (str): Can only be "text" or "checkbox".
skip_me_now (bool, optional): Disables the "Me (Now)" option for the document's preparer.
Defaults to False.
skip_subject_message (bool, optional): Disables the option to edit the template's default
subject and message. Defaults to False.
Returns:
An Embedded object
'''
# Prep CCs
ccs_payload = HSFormat.format_param_list(cc_roles, 'cc_roles')
# Prep Merge Fields
merge_fields_payload = {
'merge_fields': json.dumps(merge_fields)
}
payload = {
"test_mode": self._boolean(test_mode),
"skip_signer_roles": self._boolean(skip_signer_roles),
"skip_subject_message": self._boolean(skip_subject_message)
}
# remove attributes with none value
payload = HSFormat.strip_none_values(payload)
url = self.EMBEDDED_TEMPLATE_EDIT_URL + template_id
data = {}
data.update(payload)
data.update(ccs_payload)
data.update(merge_fields_payload)
request = self._get_request()
response = request.post(url, data=data)
return response
# ---- API APP METHODS --------------------------------
@api_resource(ApiApp)
def get_api_app_info(self, client_id):
''' Gets an API App by its Client ID
Returns information about the specified API App
Returns:
An ApiApp object
'''
request = self._get_request()
return request.get(self.API_APP_INFO_URL + client_id)
@api_resource_list(ApiApp)
def get_api_app_list(self, page=1, page_size=None):
''' Lists your API Apps
Args:
page (int, optional): Page number of the API App List to return. Defaults to 1.
page_size (int, optional): Number of objects to be returned per page, must be between 1 and 100, default is 20.
Returns:
A ResourceList object
'''
request = self._get_request()
parameters = {
'page': page,
'page_size': page_size
}
return request.get(self.API_APP_LIST_URL, parameters=parameters)
@api_resource(ApiApp)
def create_api_app(self, name, domain, callback_url=None, custom_logo_file=None,
oauth_callback_url=None, oauth_scopes=None, white_labeling_options=None,
option_insert_everywhere=False):
''' Creates a new API App
Creates a new API App with the specified settings.
Args:
name (str): The name of the API App
domain (str): The domain name associated with the API App
callback_url (str, optional): The URL that HelloSign events will be POSTed to
custom_logo_file (str, optional): The image file to use as a custom logo
oauth_callback_url (str, optional): The URL that HelloSign OAuth events will be POSTed to
oauth_scopes (list of str, optional): List of the API App's OAuth scopes
white_labeling_options (dict, optional): Customization options for the API App's signer page
option_insert_everywhere (bool, optional): Denotes if signers can "Insert Everywhere" when
signing a document
Returns:
An ApiApp object
'''
# Prep custom logo
custom_logo_payload = HSFormat.format_logo_params(custom_logo_file)
payload = {
"name": name,
"domain": domain,
"callback_url": callback_url,
"oauth[callback_url]": oauth_callback_url,
"oauth[scopes]": oauth_scopes,
"white_labeling_options": json.dumps(white_labeling_options),
"options[can_insert_everywhere]": self._boolean(option_insert_everywhere)
}
# remove attributes with none value
payload = HSFormat.strip_none_values(payload)
request = self._get_request()
return request.post(self.API_APP_CREATE_URL, data=payload, files=custom_logo_payload)
@api_resource(ApiApp)
def update_api_app(self, client_id, name=None, domain=None, callback_url=None,
custom_logo_file=None, oauth_callback_url=None, oauth_scopes=None,
white_labeling_options=None, option_insert_everywhere=False):
''' Updates the specified API App
Updates an API App with the specified settings.
Args:
name (str): The name of the API App
domain (str): The domain name associated with the API App
callback_url (str, optional): The URL that HelloSign events will be POSTed to
custom_logo_file (str, optional): The image file to use as a custom logo
oauth_callback_url (str, optional): The URL that HelloSign OAuth events will be POSTed to
oauth_scopes (list of str, optional): List of the API App's OAuth scopes
white_labeling_options (dict, optional): Customization options for the API App's signer page
option_insert_everywhere (bool, optional): Denotes if signers can "Insert Everywhere" when
signing a document
Returns:
An ApiApp object
'''
# Prep custom logo
custom_logo_payload = HSFormat.format_logo_params(custom_logo_file)
payload = {
"name": name,
"domain": domain,
"callback_url": callback_url,
"oauth[callback_url]": oauth_callback_url,
"oauth[scopes]": oauth_scopes,
"white_labeling_options": json.dumps(white_labeling_options),
"options[can_insert_everywhere]": self._boolean(option_insert_everywhere)
}
# remove attributes with none value
payload = HSFormat.strip_none_values(payload)
request = self._get_request()
url = self.API_APP_UPDATE_URL + client_id
return request.post(url, data=payload, files=custom_logo_payload)
def delete_api_app(self, client_id):
''' Deletes the specified API App
Deletes an API App. Can only be involved for API Apps you own.
Returns:
None
'''
request = self._get_request()
request.delete(url=self.API_APP_DELETE_URL + client_id)
# ---- UNCLAIMED DRAFT METHODS ---------------------
def create_unclaimed_draft(self, test_mode=False, files=None, file_urls=None,
draft_type=None, subject=None, message=None, signers=None, custom_fields=None,
cc_email_addresses=None, signing_redirect_url=None, form_fields_per_document=None,
metadata=None, use_preexisting_fields=False, use_text_tags=False,
hide_text_tags=False, allow_decline=False, signing_options=None, attachments=None):
''' Creates a new Draft that can be claimed using the claim URL
Creates a new Draft that can be claimed using the claim URL. The first
authenticated user to access the URL will claim the Draft and will be
shown either the "Sign and send" or the "Request signature" page with
the Draft loaded. Subsequent access to the claim URL will result in a
404. If the type is "send_document" then only the file parameter is
required. If the type is "request_signature", then the identities of the
signers and optionally the location of signing elements on the page are
also required.
Args:
test_mode (bool, optional): Whether this is a test, the signature request created from this draft will not be legally binding if set to True. Defaults to False.
files (list of str): The uploaded file(s) to send for signature
file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls`
draft_type (str): The type of unclaimed draft to create. Use "send_document" to create a claimable file, and "request_signature"
for a claimable signature request. If the type is "request_signature" then signers name and email_address are not optional.
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signers (list of dict): A list of signers, which each has the following attributes:
name (str): The name of the signer
email_address (str): Email address of the signer
order (str, optional): The order the signer is required to sign in
custom_fields (list of dict, optional): A list of custom fields. Required when a CustomField exists in the Template.
An item of the list should look like this: `{'name: value'}`
cc_email_addresses (list of str, optional): A list of email addresses that should be CC'd
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
form_fields_per_document (str or list of dict, optional): The fields that should appear on the document, expressed as a serialized JSON
data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://www.hellosign.com/api/reference#SignatureRequest)
metadata (dict, optional): Metadata to associate with the draft
use_preexisting_fields (bool): Whether to use preexisting PDF fields
use_text_tags (bool, optional): Use text tags in the provided file(s) to create form fields
hide_text_tags (bool, optional): Hide text tag areas
allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0.
signing_options (dict, optional): Allows the requester to specify the types allowed for creating a signature.
Defaults to account settings.
attachments (list of dict): A list of attachments, each with the following attributes:
name (str): The name of the attachment
instructions (str): The instructions for uploading the attachment
signer_index (int): The index of the signer who needs to upload the attachments, see signers parameter for more details
required (bool, optional): Determines if the attachment must be uploaded
Returns:
An UnclaimedDraft object
'''
self._check_required_fields({
'draft_type': draft_type
}, [{
"files": files,
"file_urls": file_urls
}]
)
params = {
'test_mode': test_mode,
'files': files,
'file_urls': file_urls,
'draft_type': draft_type,
'subject': subject,
'message': message,
'signing_redirect_url': signing_redirect_url,
'signers': signers,
'custom_fields': custom_fields,
'cc_email_addresses': cc_email_addresses,
'form_fields_per_document': form_fields_per_document,
'metadata': metadata,
'use_preexisting_fields': use_preexisting_fields,
'use_text_tags': use_text_tags,
'hide_text_tags': hide_text_tags,
'allow_decline': allow_decline,
'signing_options': signing_options,
'attachments': attachments
}
return self._create_unclaimed_draft(**params)
def create_embedded_unclaimed_draft(self, test_mode=False, client_id=None,
is_for_embedded_signing=False, requester_email_address=None, files=None,
file_urls=None, draft_type=None, subject=None, message=None, signers=None,
custom_fields=None, cc_email_addresses=None, signing_redirect_url=None,
requesting_redirect_url=None, form_fields_per_document=None, metadata=None,
use_preexisting_fields=False, use_text_tags=False, hide_text_tags=False,
skip_me_now=False, allow_decline=False, allow_reassign=False,
signing_options=None, allow_ccs=False, attachments=None):
''' Creates a new Draft to be used for embedded requesting
Args:
test_mode (bool, optional): Whether this is a test, the signature request created from this draft will not be legally binding if set to True. Defaults to False.
client_id (str): Client id of the app used to create the embedded draft.
is_for_embedded_signing (bool, optional): Whether this is also for embedded signing. Defaults to False.
requester_email_address (str): Email address of the requester.
files (list of str): The uploaded file(s) to send for signature.
file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls`
draft_type (str): The type of unclaimed draft to create. Use "send_document" to create a claimable file, and "request_signature" for a claimable signature request. If the type is "request_signature" then signers name and email_address are not optional.
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signers (list of dict): A list of signers, which each has the following attributes:
name (str): The name of the signer
email_address (str): Email address of the signer
order (str, optional): The order the signer is required to sign in
custom_fields (list of dict, optional): A list of custom fields. Required when a CustomField exists using text tags for form_fields_per_document. An item of the list should look like this: `{'name: value'}`
cc_email_addresses (list of str, optional): A list of email addresses that should be CC'd
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
requesting_redirect_url (str, optional): The URL you want the signer to be redirected to after the request has been sent.
form_fields_per_document (str or list of dict, optional): The fields that should appear on the document, expressed as a serialized JSON data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://www.hellosign.com/api/reference#SignatureRequest)
metadata (dict, optional): Metadata to associate with the draft
use_preexisting_fields (bool): Whether to use preexisting PDF fields
use_text_tags (bool, optional): Use text tags in the provided file(s) to create form fields
hide_text_tags (bool, optional): Hide text tag areas
skip_me_now (bool, optional): Disables the "Me (Now)" option for the document's preparer. Defaults to 0.
signing_options (dict, optional): Allows the requester to specify the types allowed for creating a signature. Defaults to account settings.
allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0.
allow_ccs (bool, optional): Specifies whether the user is allowed to provide email addresses to CC when sending the request. Defaults to False.
attachments (list of dict): A list of attachments, each with the following attributes:
name (str): The name of the attachment
instructions (str): The instructions for uploading the attachment
signer_index (int): The index of the signer who needs to upload the attachments, see signers parameter for more details
required (bool, optional): Determines if the attachment must be uploaded
Returns:
An UnclaimedDraft object
'''
self._check_required_fields({
'client_id': client_id,
'requester_email_address': requester_email_address,
'draft_type': draft_type
}, [{
"files": files,
"file_urls": file_urls
}]
)
params = {
'test_mode': test_mode,
'client_id': client_id,
'requester_email_address': requester_email_address,
'is_for_embedded_signing': is_for_embedded_signing,
'files': files,
'file_urls': file_urls,
'draft_type': draft_type,
'subject': subject,
'message': message,
'signing_redirect_url': signing_redirect_url,
'requesting_redirect_url': requesting_redirect_url,
'signers': signers,
'custom_fields': custom_fields,
'cc_email_addresses': cc_email_addresses,
'form_fields_per_document': form_fields_per_document,
'metadata': metadata,
'use_preexisting_fields': use_preexisting_fields,
'use_text_tags': use_text_tags,
'hide_text_tags': hide_text_tags,
'skip_me_now': skip_me_now,
'signing_options': signing_options,
'allow_reassign': allow_reassign,
'allow_decline': allow_decline,
'allow_ccs': allow_ccs,
'attachments': attachments
}
return self._create_unclaimed_draft(**params)
def create_embedded_unclaimed_draft_with_template(self, test_mode=False,
client_id=None, is_for_embedded_signing=False, template_id=None,
template_ids=None, requester_email_address=None, title=None,
subject=None, message=None, signers=None, ccs=None, signing_redirect_url=None,
requesting_redirect_url=None, metadata=None, custom_fields=None,
files=None, file_urls=None, skip_me_now=False, allow_decline=False,
allow_reassign=False, signing_options=None):
''' Creates a new Draft to be used for embedded requesting
Args:
test_mode (bool, optional): Whether this is a test, the signature request created from this draft will not be legally binding if set to True. Defaults to False.
client_id (str): Client id of the app you're using to create this draft. Visit our embedded page to learn more about this parameter.
template_id (str): The id of the Template to use when creating the Unclaimed Draft. Mutually exclusive with template_ids.
template_ids (list of str): The ids of the Templates to use when creating the Unclaimed Draft. Mutually exclusive with template_id.
requester_email_address (str): The email address of the user that should be designated as the requester of this draft, if the draft type is "request_signature."
title (str, optional): The title you want to assign to the Unclaimed Draft
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signers (list of dict): A list of signers, which each has the following attributes:
name (str): The name of the signer
email_address (str): Email address of the signer
ccs (list of str, optional): A list of email addresses that should be CC'd
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
requesting_redirect_url (str, optional): The URL you want the signer to be redirected to after the request has been sent.
is_for_embedded_signing (bool, optional): The request created from this draft will also be signable in embedded mode if set to True. The default is False.
metadata (dict, optional): Metadata to associate with the draft. Each request can include up to 10 metadata keys, with key names up to 40 characters long and values up to 500 characters long.
custom_fields (list of dict, optional): A list of custom fields. Required when a CustomField exists in the Template. An item of the list should look like this: `{'name: value'}`
files (list of str): The uploaded file(s) to append to the Signature Request.
file_urls (list of str): URLs of the file for HelloSign to download to append to the Signature Request. Use either `files` or `file_urls`
skip_me_now (bool, optional): Disables the "Me (Now)" option for the document's preparer. Defaults to 0.
allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0.
allow_reassign (bool, optional): Allows signers to reassign their signature requests to other signers if set to True. Defaults to False.
signing_options (dict, optional): Allows the requester to specify the types allowed for creating a signature. Defaults to account settings.
Returns:
An UnclaimedDraft object
'''
self._check_required_fields({
"client_id": client_id,
"requester_email_address": requester_email_address
}, [{
"template_id": template_id,
"template_ids": template_ids
}]
)
params = {
'test_mode': test_mode,
'client_id': client_id,
'is_for_embedded_signing': is_for_embedded_signing,
'template_id': template_id,
'template_ids': template_ids,
'title': title,
'subject': subject,
'message': message,
'requester_email_address': requester_email_address,
'signing_redirect_url': signing_redirect_url,
'requesting_redirect_url': requesting_redirect_url,
'signers': signers,
'ccs': ccs,
'metadata': metadata,
'custom_fields': custom_fields,
'files': files,
'file_urls': file_urls,
'skip_me_now': skip_me_now,
'allow_decline': allow_decline,
'allow_reassign': allow_reassign,
'signing_options': signing_options
}
return self._create_embedded_unclaimed_draft_with_template(**params)
@api_resource(UnclaimedDraft)
def unclaimed_draft_edit_and_resend(self, signature_request_id, client_id,
test_mode=False, requesting_redirect_url=None, signing_redirect_url=None,
is_for_embedded_signing=False, requester_email_address=None):
''' Updates a new signature request from an embedded request that can be edited prior to being sent.
Args:
signature_request_id (str): The id of the SignatureRequest to edit and resend
client_id (str): Client id of the app you're using to create this draft.
test_mode (bool, optional): Whether this is a test, the signature request created from this
draft will not be legally binding if set to True. Defaults to False.
requesting_redirect_url (str, optional): The URL you want the signer to be redirected to after the request has been sent.
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
is_for_embedded_signing (bool, optional): The request created from this draft will also be signable in
embedded mode if set to True. The default is False.
requester_email_address (str, optional): The email address of the user that should be designated as the
requester of this draft, if the draft type is "request_signature."
Returns:
A UnclaimedDraft object
'''
self._check_required_fields({
"client_id": client_id
}
)
data = {
'client_id': client_id,
'test_mode': self._boolean(test_mode),
'requesting_redirect_url': requesting_redirect_url,
'signing_redirect_url': signing_redirect_url,
'is_for_embedded_signing': self._boolean(is_for_embedded_signing),
'requester_email_address': requester_email_address
}
data = HSFormat.strip_none_values(data)
request = self._get_request()
return request.post(self.UNCLAIMED_DRAFT_EDIT_AND_RESEND_URL + signature_request_id, data=data)
# ---- OAUTH METHODS -------------------------------
def get_oauth_data(self, code, client_id, client_secret, state):
''' Get Oauth data from HelloSign
Args:
code (str): Code returned by HelloSign for our callback url
client_id (str): Client id of the associated app
client_secret (str): Secret token of the associated app
Returns:
A HSAccessTokenAuth object
'''
request = self._get_request()
response = request.post(self.OAUTH_TOKEN_URL, {
"state": state,
"code": code,
"grant_type": "authorization_code",
"client_id": client_id,
"client_secret": client_secret
})
return HSAccessTokenAuth.from_response(response)
def refresh_access_token(self, refresh_token):
''' Refreshes the current access token.
Gets a new access token, updates client auth and returns it.
Args:
refresh_token (str): Refresh token to use
Returns:
The new access token
'''
request = self._get_request()
response = request.post(self.OAUTH_TOKEN_URL, {
"grant_type": "refresh_token",
"refresh_token": refresh_token
})
self.auth = HSAccessTokenAuth.from_response(response)
return self.auth.access_token
# ---- HELPERS -------------------------------------
def get_last_warnings(self):
''' Return the warnings associated with the last request '''
if self.request:
return self.request.get_warnings()
def _boolean(self, v):
''' Convert a value to a boolean '''
return '1' if (v in (True, 'true', 'True', '1', 1)) else '0'
def _get_request(self, auth=None):
''' Return an http request object
auth: Auth data to use
Returns:
A HSRequest object
'''
self.request = HSRequest(auth or self.auth, self.env)
self.request.response_callback = self.response_callback
return self.request
def _authenticate(self, email_address=None, password=None, api_key=None,
access_token=None, access_token_type=None):
''' Create authentication object to send requests
Args:
email_address (str): Email address of the account to make the requests
password (str): Password of the account used with email address
api_key (str): API Key. You can find your API key in https://app.hellosign.com/home/myAccount/current_tab/integrations#api
access_token (str): OAuth access token
access_token_type (str): Type of OAuth access token
Raises:
NoAuthMethod: If no authentication information found
Returns:
A HTTPBasicAuth or HSAccessTokenAuth object
'''
if access_token_type and access_token:
return HSAccessTokenAuth(access_token, access_token_type)
elif api_key:
return HTTPBasicAuth(api_key, '')
elif email_address and password:
return HTTPBasicAuth(email_address, password)
else:
raise NoAuthMethod("No authentication information found!")
def _check_required_fields(self, fields=None, either_fields=None):
''' Check the values of the fields
If no value found in `fields`, an exception will be raised.
`either_fields` are the fields that one of them must have a value
Raises:
HSException: If no value found in at least one item of`fields`, or
no value found in one of the items of `either_fields`
Returns:
None
'''
for (key, value) in fields.items():
# If value is a dict, one of the fields in the dict is required ->
# exception if all are None
if not value:
raise HSException("Field '%s' is required." % key)
if either_fields is not None:
for field in either_fields:
if not any(field.values()):
raise HSException("One of the following fields is required: %s" % ", ".join(field.keys()))
@api_resource(SignatureRequest)
def _send_signature_request(self, test_mode=False, client_id=None, files=None,
file_urls=None, title=None, subject=None, message=None,
signing_redirect_url=None, signers=None, custom_fields=None,
cc_email_addresses=None, form_fields_per_document=None, use_text_tags=False,
hide_text_tags=False, metadata=None, allow_decline=False, allow_reassign=False,
signing_options=None, is_for_embedded_signing=False, attachments=None):
''' To share the same logic between send_signature_request &
send_signature_request_embedded functions
Args:
test_mode (bool, optional): Whether this is a test, the signature request will not be legally binding if set to True. Defaults to False.
client_id (str): Client id of the app you're using to create this embedded signature request. Visit the embedded page to learn more about this parameter (https://www.hellosign.com/api/embeddedSigningWalkthrough)
files (list of str): The uploaded file(s) to send for signature
file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls`
title (str, optional): The title you want to assign to the SignatureRequest
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign
signers (list of dict): A list of signers, which each has the following attributes:
name (str): The name of the signer
email_address (str): Email address of the signer
order (str, optional): The order the signer is required to sign in
pin (str, optional): The 4- to 12-character access code that will secure this signer's signature page
custom_fields (list of dict, optional): A list of custom fields. Required when a CustomField exists in the Template
cc_email_addresses (list, optional): A list of email addresses that should be CCed
form_fields_per_document (str or list of dict, optional): The fields that should appear on the document, expressed as a serialized JSON data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://www.hellosign.com/api/reference#SignatureRequest)
use_text_tags (bool, optional): Use text tags in the provided file(s) to create form fields
hide_text_tags (bool, optional): Hide text tag areas
metadata (dict, optional): Metadata to associate with the signature request
allow_decline (bool, optional); Allows signers to decline to sign a document if set to 1. Defaults to 0.
allow_reassign (bool, optional): Allows signers to reassign their signature requests to other signers if set to True. Defaults to False.
signing_options (dict, optional): Allows the requester to specify the types allowed for creating a signature. Defaults to account settings.
is_for_embedded_signing (bool): send_signature_request and send_signature_request_embedded share the same sending logic. To differenciate the two calls embedded requests are now flagged.
attachments (list of dict): A list of attachments, each with the following attributes:
name (str): The name of attachment
instructions (str): The instructions for uploading the attachment
signer_index (int): The signer's index whose needs to upload the attachments, see signers parameter for more details
required (bool, optional): Determines if the attachment must be uploaded
Returns:
A SignatureRequest object
'''
# Files
files_payload = HSFormat.format_file_params(files)
# File URLs
file_urls_payload = HSFormat.format_file_url_params(file_urls)
# Signers
signers_payload = HSFormat.format_dict_list(signers, 'signers')
# Custom fields
custom_fields_payload = HSFormat.format_custom_fields(custom_fields)
# Form fields per document
if isinstance(form_fields_per_document, str):
form_fields_payload = form_fields_per_document
else:
form_fields_payload = HSFormat.format_json_data(form_fields_per_document)
# CCs
cc_email_addresses_payload = HSFormat.format_param_list(cc_email_addresses, 'cc_email_addresses')
# Metadata
metadata_payload = HSFormat.format_single_dict(metadata, 'metadata')
# Signing options
signing_options_payload = HSFormat.format_signing_options(signing_options, 'signing_options')
# Attachments
attachments_payload = HSFormat.format_dict_list(attachments, 'attachments')
payload = {
"test_mode": self._boolean(test_mode),
"client_id": client_id,
"title": title,
"subject": subject,
"message": message,
"signing_redirect_url": signing_redirect_url,
"form_fields_per_document": form_fields_payload,
"use_text_tags": self._boolean(use_text_tags),
"hide_text_tags": self._boolean(hide_text_tags),
"allow_decline": self._boolean(allow_decline),
"allow_reassign": self._boolean(allow_reassign),
"signing_options": HSFormat.format_json_data(signing_options)
}
# remove attributes with none value
payload = HSFormat.strip_none_values(payload)
url = self.SIGNATURE_REQUEST_CREATE_URL
if is_for_embedded_signing:
url = self.SIGNATURE_REQUEST_CREATE_EMBEDDED_URL
data = {}
data.update(payload)
data.update(signers_payload)
data.update(custom_fields_payload)
data.update(cc_email_addresses_payload)
data.update(file_urls_payload)
data.update(metadata_payload)
data.update(signing_options_payload)
data.update(attachments_payload)
request = self._get_request()
response = request.post(url, data=data, files=files_payload)
return response
@api_resource(SignatureRequest)
def _send_signature_request_with_template(self, test_mode=False, client_id=None,
template_id=None, template_ids=None, title=None, subject=None, message=None,
signing_redirect_url=None, signers=None, ccs=None, custom_fields=None,
metadata=None, allow_decline=False, files=None, file_urls=None, signing_options=None):
''' To share the same logic between send_signature_request_with_template
and send_signature_request_embedded_with_template
Args:
test_mode (bool, optional): Whether this is a test, the signature request will not be legally binding if set to True. Defaults to False.
client_id (str): Client id of the app you're using to create this embedded signature request. Visit the embedded page to learn more about this parameter (https://app.hellosign.com/api/embeddedSigningWalkthrough)
template_id (str): The id of the Template to use when creating the SignatureRequest. Mutually exclusive with template_ids.
template_ids (list): The ids of the Templates to use when creating the SignatureRequest. Mutually exclusive with template_id.
title (str, optional): The title you want to assign to the SignatureRequest
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
signers (list of dict): A list of signers, which each has the following attributes:
role_name (str): Role the signer is assigned to
name (str): The name of the signer
email_address (str): Email address of the signer
pin (str, optional): The 4- to 12-character access code that will secure this signer's signature page
ccs (list of dict, optional): The email address of the CC filling the role of RoleName. Required when a CC role exists for the Template. Each dict has the following attributes:
role_name (str): CC role name
email_address (str): CC email address
custom_fields (list of dict, optional): A list of custom fields. Required when a CustomField exists in the Template. An item of the list should look like this: `{'name: value'}`
metadata (dict, optional): Metadata to associate with the signature request
allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0.
files (list of str): The uploaded file(s) to append to the Signature Request.
file_urls (list of str): URLs of the file for HelloSign to download to append to the Signature Request. Use either `files` or `file_urls`
signing_options (dict, optional): Allows the requester to specify the types allowed for creating a signature. Defaults to account settings.
Returns:
A SignatureRequest object
'''
# Signers
signers_payload = HSFormat.format_dict_list(signers, 'signers', 'role_name')
# CCs
ccs_payload = HSFormat.format_dict_list(ccs, 'ccs', 'role_name')
# Custom fields
custom_fields_payload = HSFormat.format_custom_fields(custom_fields)
# Metadata
metadata_payload = HSFormat.format_single_dict(metadata, 'metadata')
# Signing options
signing_options_payload = HSFormat.format_signing_options(signing_options, 'signing_options')
# Template ids
template_ids_payload = {}
if template_ids:
for i in range(len(template_ids)):
template_ids_payload["template_ids[%s]" % i] = template_ids[i]
# Files
files_payload = HSFormat.format_file_params(files)
# File URLs
file_urls_payload = HSFormat.format_file_url_params(file_urls)
payload = {
"test_mode": self._boolean(test_mode),
"client_id": client_id,
"template_id": template_id,
"title": title,
"subject": subject,
"message": message,
"signing_redirect_url": signing_redirect_url,
"allow_decline": self._boolean(allow_decline),
"signing_options": HSFormat.format_json_data(signing_options)
}
# remove attributes with empty value
payload = HSFormat.strip_none_values(payload)
url = self.SIGNATURE_REQUEST_CREATE_WITH_TEMPLATE_URL
if client_id:
url = self.SIGNATURE_REQUEST_CREATE_EMBEDDED_WITH_TEMPLATE_URL
data = payload.copy()
data.update(signers_payload)
data.update(ccs_payload)
data.update(custom_fields_payload)
data.update(metadata_payload)
data.update(signing_options_payload)
data.update(template_ids_payload)
data.update(file_urls_payload)
request = self._get_request()
response = request.post(url, data=data, files=files_payload)
return response
@api_resource(UnclaimedDraft)
def _create_unclaimed_draft(self, test_mode=False, client_id=None,
is_for_embedded_signing=False, requester_email_address=None, files=None,
file_urls=None, draft_type=None, subject=None, message=None, signers=None,
custom_fields=None, cc_email_addresses=None, signing_redirect_url=None,
requesting_redirect_url=None, form_fields_per_document=None, metadata=None,
use_preexisting_fields=False, use_text_tags=False, hide_text_tags=False,
skip_me_now=False, allow_reassign=False, allow_decline=False,
signing_options=None, allow_ccs=False, attachments=None):
''' Creates a new Draft that can be claimed using the claim URL
Args:
test_mode (bool, optional): Whether this is a test, the signature request created from this draft will not be legally binding if set to True. Defaults to False.
client_id (str): Client id of the app used to create the embedded draft.
is_for_embedded_signing (bool): Whether this is for embedded signing on top of being for embedded requesting.
requester_email_address (str): Email address of the requester when creating a draft for embedded requesting.
files (list of str): The uploaded file(s) to send for signature.
file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls`
draft_type (str): The type of unclaimed draft to create. Use "send_document" to create a claimable file, and "request_signature" for a claimable signature request. If the type is "request_signature" then signers name and email_address are not optional.
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signers (list of dict): A list of signers, which each has the following attributes:
name (str): The name of the signer
email_address (str): Email address of the signer
order (str, optional): The order the signer is required to sign in
custom_fields (list of dict, optional): A list of custom fields. Required when a CustomField exists using text tags or form_fields_per_document. An item of the list should look like this: `{'name: value'}`
cc_email_addresses (list of str, optional): A list of email addresses that should be CC'd
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
requesting_redirect_url (str, optional): The URL you want the signer to be redirected to after the request has been sent.
form_fields_per_document (str or list of dict, optional): The fields that should appear on the document, expressed as a serialized JSON data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://www.hellosign.com/api/reference#SignatureRequest).
metadata (dict, optional): Metadata to associate with the draft
use_preexisting_fields (bool): Whether to use preexisting PDF fields
use_text_tags (bool, optional): Use text tags in the provided file(s) to create form fields
hide_text_tags (bool, optional): Hide text tag areas
skip_me_now (bool, optional): Disables the "Me (Now)" option for the document's preparer. Defaults to 0.
allow_reassign (bool, optional): Allows signers to reassign their signature requests to other signers if set to True. Defaults to False.
allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0.
signing_options (dict, optional): Allows the requester to specify the types allowed for creating a signature. Defaults to account settings.
allow_ccs (bool, optional): Specifies whether the user is allowed to provide email addresses to CC when sending the request. Defaults to False.
attachments (list of dict): A list of attachments, each with the following attributes:
name (str): The name of the attachment
instructions (str): The instructions for uploading the attachment
signer_index (int): The index of the signer who needs to upload the attachments, see signers parameter for more details
required (bool, optional): Determines if the attachment must be uploaded
Returns:
An UnclaimedDraft object
'''
# Files
files_payload = HSFormat.format_file_params(files)
# Files URLs
file_urls_payload = HSFormat.format_file_url_params(file_urls)
# Signers
signers_payload = {}
if signers:
for (idx, signer) in enumerate(signers):
if draft_type == UnclaimedDraft.UNCLAIMED_DRAFT_REQUEST_SIGNATURE_TYPE:
if "name" not in signer and "email_address" not in signer:
raise HSException("Signer's name and email are required")
signers_payload = HSFormat.format_dict_list(signers, 'signers')
# CCs
cc_email_addresses_payload = HSFormat.format_param_list(cc_email_addresses, 'cc_email_addresses')
# Custom fields
custom_fields_payload = HSFormat.format_custom_fields(custom_fields)
# Form fields per document
if isinstance(form_fields_per_document, str):
form_fields_payload = form_fields_per_document
else:
form_fields_payload = HSFormat.format_json_data(form_fields_per_document)
# Metadata
metadata_payload = HSFormat.format_single_dict(metadata, 'metadata')
# Signing options
signing_options_payload = HSFormat.format_signing_options(signing_options, 'signing_options')
# Attachments
attachments_payload = HSFormat.format_dict_list(attachments, 'attachments')
payload = {
"test_mode": self._boolean(test_mode),
"type": draft_type,
"subject": subject,
"message": message,
"signing_redirect_url": signing_redirect_url,
"form_fields_per_document": form_fields_payload,
"use_preexisting_fields": self._boolean(use_preexisting_fields),
"use_text_tags": self._boolean(use_text_tags),
"hide_text_tags": self._boolean(hide_text_tags),
"skip_me_now": self._boolean(skip_me_now),
"allow_reassign": self._boolean(allow_reassign),
"allow_decline": self._boolean(allow_decline),
"signing_options": HSFormat.format_json_data(signing_options),
"allow_ccs": self._boolean(allow_ccs)
}
url = self.UNCLAIMED_DRAFT_CREATE_URL
if client_id is not None:
payload.update({
'client_id': client_id,
'is_for_embedded_signing': '1' if is_for_embedded_signing else '0',
'requester_email_address': requester_email_address,
'requesting_redirect_url': requesting_redirect_url
})
url = self.UNCLAIMED_DRAFT_CREATE_EMBEDDED_URL
# remove attributes with none value
payload = HSFormat.strip_none_values(payload)
data = payload.copy()
data.update(signers_payload)
data.update(custom_fields_payload)
data.update(cc_email_addresses_payload)
data.update(file_urls_payload)
data.update(metadata_payload)
data.update(signing_options_payload)
data.update(attachments_payload)
request = self._get_request()
response = request.post(url, data=data, files=files_payload)
return response
@api_resource(Template)
def _add_remove_user_template(self, url, template_id, account_id=None, email_address=None):
''' Add or Remove user from a Template
We use this function for two tasks because they have the same API call
Args:
template_id (str): The id of the template
account_id (str): ID of the account to add/remove access to/from
email_address (str): The email_address of the account to add/remove access to/from
Raises:
HSException: If no email address or account_id specified
Returns:
A Template object
'''
if not email_address and not account_id:
raise HSException("No email address or account_id specified")
data = {}
if account_id is not None:
data = {
"account_id": account_id
}
else:
data = {
"email_address": email_address
}
request = self._get_request()
response = request.post(url + template_id, data)
return response
@api_resource(Team)
def _add_remove_team_member(self, url, email_address=None, account_id=None):
''' Add or Remove a team member
We use this function for two different tasks because they have the same
API call
Args:
email_address (str): Email address of the Account to add/remove
account_id (str): ID of the Account to add/remove
Returns:
A Team object
'''
if not email_address and not account_id:
raise HSException("No email address or account_id specified")
data = {}
if account_id is not None:
data = {
"account_id": account_id
}
else:
data = {
"email_address": email_address
}
request = self._get_request()
response = request.post(url, data)
return response
@api_resource(Template)
def _create_embedded_template_draft(self, client_id, signer_roles, test_mode=False,
files=None, file_urls=None, title=None, subject=None, message=None,
cc_roles=None, merge_fields=None, skip_me_now=False,
use_preexisting_fields=False, metadata=None, allow_reassign=False, allow_ccs=False, attachments=None):
''' Helper method for creating embedded template drafts.
See public function for params.
'''
url = self.TEMPLATE_CREATE_EMBEDDED_DRAFT_URL
payload = {
'test_mode': self._boolean(test_mode),
'client_id': client_id,
'title': title,
'subject': subject,
'message': message,
'skip_me_now': self._boolean(skip_me_now),
'use_preexisting_fields': self._boolean(use_preexisting_fields),
'allow_reassign': self._boolean(allow_reassign),
'allow_ccs':
self._boolean(allow_ccs)
}
# Prep files
files_payload = HSFormat.format_file_params(files)
file_urls_payload = HSFormat.format_file_url_params(file_urls)
# Prep Signer Roles
signer_roles_payload = HSFormat.format_dict_list(signer_roles, 'signer_roles')
# Prep CCs
ccs_payload = HSFormat.format_param_list(cc_roles, 'cc_roles')
# Prep Merge Fields
merge_fields_payload = {
'merge_fields': json.dumps(merge_fields)
}
# Prep Metadata
metadata_payload = HSFormat.format_single_dict(metadata, 'metadata')
# Attachments
attachments_payload = HSFormat.format_dict_list(attachments, 'attachments')
# Assemble data for sending
data = {}
data.update(payload)
data.update(file_urls_payload)
data.update(signer_roles_payload)
data.update(ccs_payload)
data.update(metadata_payload)
data.update(attachments_payload)
if (merge_fields is not None):
data.update(merge_fields_payload)
data = HSFormat.strip_none_values(data)
request = self._get_request()
response = request.post(url, data=data, files=files_payload)
return response
@api_resource(UnclaimedDraft)
def _create_embedded_unclaimed_draft_with_template(self, test_mode=False,
client_id=None, is_for_embedded_signing=False, template_id=None,
template_ids=None, requester_email_address=None, title=None,
subject=None, message=None, signers=None, ccs=None,
signing_redirect_url=None, requesting_redirect_url=None, metadata=None,
custom_fields=None, files=None, file_urls=None, skip_me_now=False,
allow_decline=False, allow_reassign=False, signing_options=None):
''' Helper method for creating unclaimed drafts from templates
See public function for params.
'''
#single params
payload = {
"test_mode": self._boolean(test_mode),
"client_id": client_id,
"is_for_embedded_signing": self._boolean(is_for_embedded_signing),
"template_id": template_id,
"requester_email_address": requester_email_address,
"title": title,
"subject": subject,
"message": message,
"signing_redirect_url": signing_redirect_url,
"requesting_redirect_url": requesting_redirect_url,
"skip_me_now": self._boolean(skip_me_now),
"allow_decline": self._boolean(allow_decline),
"allow_reassign": self._boolean(allow_reassign),
"signing_options": HSFormat.format_json_data(signing_options)
}
#format multi params
template_ids_payload = HSFormat.format_param_list(template_ids, 'template_ids')
signers_payload = HSFormat.format_dict_list(signers, 'signers', 'role_name')
ccs_payload = HSFormat.format_dict_list(ccs, 'ccs', 'role_name')
metadata_payload = HSFormat.format_single_dict(metadata, 'metadata')
signing_options_payload = HSFormat.format_signing_options(signing_options, 'signing_options')
custom_fields_payload = HSFormat.format_custom_fields(custom_fields)
# Files
files_payload = HSFormat.format_file_params(files)
# File URLs
file_urls_payload = HSFormat.format_file_url_params(file_urls)
#assemble payload
data = {}
data.update(payload)
data.update(template_ids_payload)
data.update(signers_payload)
data.update(ccs_payload)
data.update(metadata_payload)
data.update(signing_options_payload)
data.update(custom_fields_payload)
data.update(file_urls_payload)
data = HSFormat.strip_none_values(data)
#send call
url = self.UNCLAIMED_DRAFT_CREATE_EMBEDDED_WITH_TEMPLATE_URL
request = self._get_request()
response = request.post(url, data=data, files=files_payload)
return response
|
HelloFax/hellosign-python-sdk
|
hellosign_sdk/hsclient.py
|
Python
|
mit
| 106,053
|
[
"VisIt"
] |
605075d73e0d94ebec2cb36aaaadd667da62b386a1ffc6fbdf12e5683b863136
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAffy(RPackage):
"""The package contains functions for exploratory oligonucleotide array
analysis. The dependence on tkWidgets only concerns few convenience
functions. 'affy' is fully functional without it."""
homepage = "https://bioconductor.org/packages/affy/"
url = "https://git.bioconductor.org/packages/affy"
list_url = homepage
version('1.54.0', git='https://git.bioconductor.org/packages/affy', commit='a815f02906fcf491b28ed0a356d6fce95a6bd20e')
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-affyio', type=('build', 'run'))
depends_on('r-biocinstaller', type=('build', 'run'))
depends_on('r-preprocesscore', type=('build', 'run'))
depends_on('r-zlibbioc', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@1.54.0')
|
skosukhin/spack
|
var/spack/repos/builtin/packages/r-affy/package.py
|
Python
|
lgpl-2.1
| 2,130
|
[
"Bioconductor"
] |
d27429d004ee61f27683cc5b9fbda9d1f135eefa7fae05180e4616ec68e01d6f
|
# Copyright 2017 Max Planck Society
# Distributed under the BSD-3 Software license,
# (See accompanying file ./LICENSE.txt or copy at
# https://opensource.org/licenses/BSD-3-Clause)
"""Training AdaGAN on various datasets.
Refer to the arXiv paper 'AdaGAN: Boosting Generative Models'
Coded by Ilya Tolstikhin, Carl-Johann Simon-Gabriel
"""
import os
import argparse
import logging
import tensorflow as tf
import numpy as np
from datahandler import DataHandler
from adagan import AdaGan
from metrics import Metrics
import utils
flags = tf.app.flags
flags.DEFINE_float("g_learning_rate", 0.0001,
"Learning rate for Generator optimizers [16e-4]")
flags.DEFINE_float("d_learning_rate", 0.00005,
"Learning rate for Discriminator optimizers [4e-4]")
flags.DEFINE_float("learning_rate", 0.003,
"Learning rate for other optimizers [8e-4]")
flags.DEFINE_float("adam_beta1", 0.5, "Beta1 parameter for Adam optimizer [0.5]")
flags.DEFINE_integer("zdim", 64, "Dimensionality of the latent space [100]")
flags.DEFINE_float("init_std", 0.0099999, "Initial variance for weights [0.02]")
flags.DEFINE_string("workdir", 'results_celeba_pot', "Working directory ['results']")
flags.DEFINE_bool("unrolled", False, "Use unrolled GAN training [True]")
flags.DEFINE_bool("vae", False, "Use VAE instead of GAN")
flags.DEFINE_bool("pot", True, "Use POT instead of GAN")
flags.DEFINE_float("pot_lambda", 10., "POT regularization")
flags.DEFINE_bool("is_bagging", False, "Do we want to use bagging instead of adagan? [False]")
FLAGS = flags.FLAGS
def main():
opts = {}
# Utility
opts['random_seed'] = 66
opts['dataset'] = 'celebA' # gmm, circle_gmm, mnist, mnist3 ...
opts['celebA_crop'] = 'closecrop' # closecrop or resizecrop
opts['data_dir'] = 'celebA/datasets/celeba/img_align_celeba'
opts['trained_model_path'] = None #'models'
opts['mnist_trained_model_file'] = None #'mnist_trainSteps_19999_yhat' # 'mnist_trainSteps_20000'
opts['work_dir'] = FLAGS.workdir
opts['ckpt_dir'] = 'checkpoints'
opts["verbose"] = 2
opts['tf_run_batch_size'] = 128
opts["early_stop"] = -1 # set -1 to run normally
opts["plot_every"] = 500
opts["save_every_epoch"] = 20
opts['gmm_max_val'] = 15.
# Datasets
opts['toy_dataset_size'] = 10000
opts['toy_dataset_dim'] = 2
opts['mnist3_dataset_size'] = 2 * 64 # 64 * 2500
opts['mnist3_to_channels'] = False # Hide 3 digits of MNIST to channels
opts['input_normalize_sym'] = True # Normalize data to [-1, 1]
opts['gmm_modes_num'] = 5
# AdaGAN parameters
opts['adagan_steps_total'] = 1
opts['samples_per_component'] = 1000
opts['is_bagging'] = FLAGS.is_bagging
opts['beta_heur'] = 'uniform' # uniform, constant
opts['weights_heur'] = 'theory_star' # theory_star, theory_dagger, topk
opts['beta_constant'] = 0.5
opts['topk_constant'] = 0.5
opts["mixture_c_epoch_num"] = 5
opts["eval_points_num"] = 25600
opts['digit_classification_threshold'] = 0.999
opts['inverse_metric'] = False # Use metric from the Unrolled GAN paper?
opts['inverse_num'] = 100 # Number of real points to inverse.
opts['objective'] = None
# Generative model parameters
opts["init_std"] = FLAGS.init_std
opts["init_bias"] = 0.0
opts['latent_space_distr'] = 'normal' # uniform, normal
opts['latent_space_dim'] = FLAGS.zdim
opts["gan_epoch_num"] = 300
opts['convolutions'] = True # If False then encoder is MLP of 3 layers
opts['d_num_filters'] = 1024
opts['d_num_layers'] = 4
opts['g_num_filters'] = 256
opts['g_num_layers'] = 12
opts['e_is_random'] = False
opts['e_pretrain'] = True
opts['e_add_noise'] = True
opts['e_pretrain_bsize'] = 256
opts['e_num_filters'] = 256
opts['e_num_layers'] = 12
opts['g_arch'] = 'began'
opts['g_stride1_deconv'] = False
opts['g_3x3_conv'] = 0
opts['e_arch'] = 'began'
opts['e_3x3_conv'] = 0
opts['conv_filters_dim'] = 3
# --GAN specific:
opts['conditional'] = False
opts['unrolled'] = FLAGS.unrolled # Use Unrolled GAN? (only for images)
opts['unrolling_steps'] = 5 # Used only if unrolled = True
# --VAE specific
opts['vae'] = FLAGS.vae
opts['vae_sigma'] = 0.01
# --POT specific
opts['pot'] = FLAGS.pot
opts['pot_pz_std'] = 2.
opts['pot_lambda'] = FLAGS.pot_lambda
opts['adv_c_loss'] = 'none'
opts['vgg_layer'] = 'pool2'
opts['adv_c_patches_size'] = 5
opts['adv_c_num_units'] = 32
opts['adv_c_loss_w'] = 1.0
opts['cross_p_w'] = 0.0
opts['diag_p_w'] = 0.0
opts['emb_c_loss_w'] = 1.0
opts['reconstr_w'] = 1.0
opts['z_test'] = 'mmd'
opts['gan_p_trick'] = False
opts['pz_transform'] = False
opts['z_test_corr_w'] = 0.0
opts['z_test_proj_dim'] = 10
# Optimizer parameters
opts['optimizer'] = 'adam' # sgd, adam
opts["batch_size"] = 64
opts["d_steps"] = 1
opts['d_new_minibatch'] = False
opts["g_steps"] = 2
opts['batch_norm'] = True
opts['dropout'] = False
opts['dropout_keep_prob'] = 0.5
opts['recon_loss'] = 'l2sq'
# "manual" or number (float or int) giving the number of epochs to divide
# the learning rate by 10 (converted into an exp decay per epoch).
opts['decay_schedule'] = 'plateau'
opts['opt_learning_rate'] = FLAGS.learning_rate
opts['opt_d_learning_rate'] = FLAGS.d_learning_rate
opts['opt_g_learning_rate'] = FLAGS.g_learning_rate
opts["opt_beta1"] = FLAGS.adam_beta1
opts['batch_norm_eps'] = 1e-05
opts['batch_norm_decay'] = 0.9
if opts['e_is_random']:
assert opts['latent_space_distr'] == 'normal',\
'Random encoders currently work only with Gaussian Pz'
# Data augmentation
opts['data_augm'] = False
if opts['verbose']:
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(message)s')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
utils.create_dir(opts['work_dir'])
utils.create_dir(os.path.join(opts['work_dir'], opts['ckpt_dir']))
with utils.o_gfile((opts['work_dir'], 'params.txt'), 'w') as text:
text.write('Parameters:\n')
for key in opts:
text.write('%s : %s\n' % (key, opts[key]))
data = DataHandler(opts)
assert data.num_points >= opts['batch_size'], 'Training set too small'
adagan = AdaGan(opts, data)
metrics = Metrics()
train_size = data.num_points
random_idx = np.random.choice(train_size, 4*40, replace=False)
metrics.make_plots(opts, 0, data.data,
data.data[random_idx], adagan._data_weights, prefix='dataset_')
for step in range(opts["adagan_steps_total"]):
logging.info('Running step {} of AdaGAN'.format(step + 1))
adagan.make_step(opts, data)
num_fake = opts['eval_points_num']
logging.debug('Sampling fake points')
fake_points = adagan.sample_mixture(num_fake)
logging.debug('Sampling more fake points')
more_fake_points = adagan.sample_mixture(500)
logging.debug('Plotting results')
if opts['dataset'] == 'gmm':
metrics.make_plots(opts, step, data.data[:500],
fake_points[0:100], adagan._data_weights[:500])
logging.debug('Evaluating results')
(likelihood, C) = metrics.evaluate(
opts, step, data.data[:500],
fake_points, more_fake_points, prefix='')
else:
metrics.make_plots(opts, step, data.data,
fake_points[:320], adagan._data_weights)
if opts['inverse_metric']:
logging.debug('Evaluating results')
l2 = np.min(adagan._invert_losses[:step + 1], axis=0)
logging.debug('MSE=%.5f, STD=%.5f' % (np.mean(l2), np.std(l2)))
res = metrics.evaluate(
opts, step, data.data[:500],
fake_points, more_fake_points, prefix='')
logging.debug("AdaGan finished working!")
if __name__ == '__main__':
main()
|
tolstikhin/adagan
|
iclr_celeba_began.py
|
Python
|
bsd-3-clause
| 8,105
|
[
"Gaussian"
] |
d21b4c696e31ff48bcfc46aea6e75298aff5122cbeb9b10c12bda051dd67d46a
|
#!/usr/bin/env python
## Program: VMTK
## Module: $RCSfile: vmtkadd.py,v $
## Language: Python
## Date: $Date: 2016/07/19 09:49:59 $
## Version: $Revision: 1.6 $
## Copyright (c) Jingfeng Jiang, Yu Wang. All rights reserved.
## See LICENCE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
import vtk
import vtkvmtk
import sys
import pypes
vmtkadd = 'vmtkimagedatawrite'
class vmtkimagedatawrite(pypes.pypeScript):
def __init__(self):
pypes.pypeScript.__init__(self)
self.Surface = None
self.dSurface = None
self.SetScriptName('vmtkimagedatawrite')
self.SetScriptDoc('convert unstructruredgrid to imagedata through vtkprobefilter')
self.SetInputMembers([
['Surface','i','vtkUnstructuredGrid',1,'','the input mesh','vmtkmeshreader']
])
## self.SetOutputMembers([
## ['dSurface','d','vtkUnstructuredGrid',1,'','the output mesh','vmtkmeshwriter']
## ])
def Execute(self):
if self.Surface == None:
self.PrintError('Error: No Surface.')
self.PrintLog('Computing +++convertion')
convertion = vtkvmtk.vtkvmtkadd()
convertion.SetInput(self.Surface)
convertion.Setdisplacementdataname('displacementz')
convertion.Update()
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
|
jjiang-mtu/virtual-breast-project
|
quasi-static_UE/add_displacement/vmtkadd.py
|
Python
|
gpl-2.0
| 1,636
|
[
"VTK"
] |
1a91e18cb5a5ec7063dc3c48c55bbb3c79690e2f0d35d4e77d3ac511e760fa1b
|
"""Repository checks
These checks verify consistency with the repository (blacklisting,
other channels, existing versions).
"""
from .. import utils
from . import LintCheck, ERROR, WARNING, INFO
class in_other_channels(LintCheck):
"""A package of the same name already exists in another channel
Bioconda and Conda-Forge occupy the same name space and have
agreed not to add packages if a package of the same name is
already present in the respective other channel.
If this is a new package, pease choose a different name
(e.g. append ``-bio``).
If you are updating a package, please continue in the package's
new home at conda-forge.
"""
def check_recipe(self, recipe):
channels = utils.RepoData().get_package_data(key="channel", name=recipe.name)
if set(channels) - set(('bioconda',)):
self.message(section='package/name')
class build_number_needs_bump(LintCheck):
"""The recipe build number should be incremented
A package with the same name and version and a build number at
least as high as specified in the recipe already exists in the
channel. Please increase the build number.
"""
def check_recipe(self, recipe):
bldnos = utils.RepoData().get_package_data(
key="build_number",
name=recipe.name, version=recipe.version)
if bldnos and recipe.build_number <= max(bldnos):
self.message('build/number', data=max(bldnos))
def fix(self, _message, data):
self.recipe.reset_buildnumber(data + 1)
return True
class build_number_needs_reset(LintCheck):
"""The recipe build number should be reset to 0
No previous build of a package of this name and this version exists,
the build number should therefore be 0.
"""
requires = ['missing_build_number']
def check_recipe(self, recipe):
bldnos = utils.RepoData().get_package_data(
key="build_number",
name=recipe.name, version=recipe.version)
if not bldnos and recipe.build_number > 0:
self.message('build/number', data=0)
def fix(self, _message, data):
self.recipe.reset_buildnumber(data)
return True
class recipe_is_blacklisted(LintCheck):
"""The recipe is currently blacklisted and will not be built.
If you are intending to repair this recipe, remove it from
the build fail blacklist.
"""
def __init__(self, linter):
super().__init__(linter)
self.blacklist = linter.get_blacklist()
self.blacklists = linter.config.get('blacklists')
def check_recipe(self, recipe):
if recipe.name in self.blacklist:
self.message(section='package/name', data=True)
def fix(self, _message, _data):
for blacklist in self.blacklists:
with open(blacklist, 'r') as fdes:
data = fdes.readlines()
for num, line in enumerate(data):
if self.recipe.name in line:
break
else:
continue
del data[num]
with open(blacklist, 'w') as fdes:
fdes.write(''.join(data))
break
else:
return False
return True
|
bioconda/bioconda-utils
|
bioconda_utils/lint/check_repo.py
|
Python
|
mit
| 3,257
|
[
"Bioconda"
] |
f5e97d53d2ad8716c36b191e3125458a39695f52e9756a47b484cdc094c83e79
|
# Copyright 2001 by Katharine Lindner. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Parser for SAF (Simple Alignment Format).
http://www.embl-heidelberg.de/predictprotein/Dexa/optin_safDes.html
"""
# standard library
import string
import array
import os
import re
import sgmllib
import urlparse
# XML from python 2.0
from xml.sax import handler
from xml.sax.handler import ContentHandler
# Martel
import Martel
from Martel import RecordReader
from Martel import Dispatch
from Bio.ParserSupport import EventGenerator
from Bio.ParserSupport import AbstractConsumer
from Bio import File
from Bio.Align.Generic import Alignment
import Bio.Alphabet
import saf_format
import Record
class Iterator:
"""Iterator interface to move over a file of Saf entries one at a time.
"""
def __init__(self, handle, parser = None):
"""Initialize the iterator.
Arguments:
o handle - A handle with Saf entries to iterate through.
o parser - An optional parser to pass the entries through before
returning them. If None, then the raw entry will be returned.
"""
self.handle = File.UndoHandle( handle )
self._reader = RecordReader.Everything( self.handle )
self._parser = parser
def next(self):
"""Return the next Saf record from the handle.
Will return None if we ran out of records.
"""
data = self._reader.next()
if self._parser is not None:
if data:
dumpfile = open( 'dump', 'w' )
dumpfile.write( data )
dumpfile.close()
return self._parser.parse(File.StringHandle(data))
return data
def __iter__(self):
return iter(self.next, None)
class _Scanner:
"""Start up Martel to do the scanning of the file.
This initialzes the Martel based parser and connects it to a handler
that will generate events for a Feature Consumer.
"""
def __init__(self, debug = 0):
"""Initialize the scanner by setting up our caches.
Creating the parser takes a long time, so we want to cache it
to reduce parsing time.
Arguments:
o debug - The level of debugging that the parser should
display. Level 0 is no debugging, Level 2 displays the most
debugging info (but is much slower). See Martel documentation
for more info on this.
"""
# a listing of all tags we are interested in scanning for
# in the MartelParser
self.interest_tags = [ 'candidate_line', 'saf_record' ]
# make a parser that returns only the tags we are interested in
expression = Martel.select_names( saf_format.saf_record, self.interest_tags)
self._parser = expression.make_parser(debug_level = debug)
def feed(self, handle, consumer):
"""Feed a set of data into the scanner.
Arguments:
o handle - A handle with the information to parse.
o consumer - The consumer that should be informed of events.
"""
consumer.set_interest_tags( self.interest_tags )
self._parser.setContentHandler( consumer )
# self._parser.setErrorHandler(handle.ErrorHandler())
self._parser.parseFile(handle)
class _RecordConsumer( Dispatch.Dispatcher ):
"""Create a Saf Record object from scanner generated information.
"""
def __init__(self ):
Dispatch.Dispatcher.__init__( self )
self.data = Record.Record()
self._refresh()
def _refresh( self ):
self._sequences = {}
self._names = {}
self._history = []
self._guide = ''
self._ref_length = 0
self._ordinal = 0
def set_interest_tags( self, interest_tags ):
self.interest_tags = interest_tags
def startDocument(self):
self.data = Record.Record()
self._refresh()
def start_candidate_line(self, name, attrs):
self.save_characters()
def end_candidate_line(self, candidate_lines ):
candidate_line = self.get_characters()
name = candidate_line.split( ' ' )[ 0 ]
sequence = candidate_line[ len( name ): ]
name = name.strip()
sequence = sequence.replace( " ", "" )
if( self._guide == '' ):
self._guide = name
self._ref_length = len( sequence )
elif( name == self._guide ):
history = []
self._ref_length = len( sequence )
try:
self._history.index( name )
except ValueError:
self._names[ self._ordinal ] = name
self._ordinal = self._ordinal + 1
self._history.append( name )
sequence = sequence.strip()
try:
sequence = self._sequences[ name ] + sequence
except KeyError:
pass
self._sequences[ name ] = sequence
def start_saf_record( self, sequence, attrs ):
self._sequences = {}
def end_saf_record( self, saf_record ):
ordinals = self._names.keys()
ordinals.sort()
for ordinal in ordinals:
name = self._names[ ordinal ]
sequence = self._sequences[ name ]
self.data.alignment.add_sequence( name, sequence )
self._refresh()
class RecordParser:
"""Parse Saf files into Record objects
"""
def __init__(self, debug_level = 0):
"""Initialize the parser.
Arguments:
o debug_level - An optional argument that specifies the amount of
debugging information Martel should spit out. By default we have
no debugging info (the fastest way to do things), but if you want
you can set this as high as two and see exactly where a parse fails.
"""
self._scanner = _Scanner(debug_level)
def parse(self, handle):
"""Parse the specified handle into a SAF record.
"""
self._consumer = _RecordConsumer()
self._scanner.feed(handle, self._consumer)
return self._consumer.data
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/Saf/__init__.py
|
Python
|
apache-2.0
| 6,366
|
[
"Biopython"
] |
0359936d8b5e9ff5b1ac971d4e0fc6e655b367b78b3e5da54d0b885e27af5891
|
from six import with_metaclass
import abc
import datetime
import tempfile
import os
from contextlib import contextmanager
from .tides import Tides
import numpy
ALL_FES2014_TIDAL_CONSTITUENTS = ["2N2", "EPS2", "J1", "K1", "K2", "L2", "M2", "M3", "M4", "M6", "M8",
"MF", "MKS2", "MM", "MN4", "MS4", "MSF", "MSQM", "MTM", "MU2", "N2", "N4", "NU2",
"O1", "P1", "Q1", "R2", "S1", "S2", "S4", "SA", "SSA", "T2"]
class TidalInterpolator(with_metaclass(abc.ABCMeta)):
"""Abstract base class for tidal interpolators."""
def set_initial_time(self, datetime0):
"""Set datetime corresponding to t=0
If datetime without timezone info is supplied (tzinfo), datetime is assumed to be UTC."""
if datetime0.tzinfo:
# only pull in this dependency when tzinfo is supplied
import pytz
self.datetime0 = pytz.utc.localize(datetime0)
else:
# with naive datetimes without tzinfo, the assumption is everything is in UTC
self.datetime0 = datetime0
@abc.abstractmethod
def set_time(self, t):
pass
@abc.abstractmethod
def get_val(self, x, **kwargs):
pass
fes_ini_template = """TIDE_{constituent}_FILE = {fes_data_path}/{lower_case_constituent}.nc
TIDE_{constituent}_LATITUDE = lat
TIDE_{constituent}_LONGITUDE = lon
TIDE_{constituent}_AMPLITUDE = amplitude
TIDE_{constituent}_PHASE = phase
"""
@contextmanager
def temporary_fes_ini_file(tide, fes_data_path):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
for constituent in tide.constituents:
f.write(fes_ini_template.format(
constituent=constituent, fes_data_path=fes_data_path,
lower_case_constituent=constituent.lower()
))
f.close()
yield f
file_name = f.name
os.remove(file_name)
class FES2014TidalInterpolator(TidalInterpolator):
"""Tidal interpolator based on FES2014 global solution.
For any given time and lat, lon, interpolates amplitude and phases of
the harmonic constituents of the FES2014 global tide solution and reconstructs
the tidal elevation in that point.
Data can be downloaded from ftp://ftp.aviso.altimetry.fr/auxiliary/tide_model/fes2014_elevations_and_load/fes2014b_elevations/
Download either ocean_tide.tar.xz or ocean_tide_extrapolated.tar.xz (latter extrapolates amplitudes and phases inland
so that interpolation in coastal locations is less dependent on being exactly within the FES2014 grid) and extract
the netcdf files. For ftp access yYou need prior registration as described here:
https://www.aviso.altimetry.fr/en/data/products/auxiliary-products/global-tide-fes.html.
To make use of this class you need to install the fes python package using:
pip install git+https://bitbucket.org/cnes_aviso/fes.git#subdirectory=python.
Constituents and initial (t=0) datetime are set via a Tides object. To use all constituents available
in the FES2014 solution:
import uptide
import datetime
tide = uptide.Tides(uptide.ALL_FES2014_TIDAL_CONSTITUENTS)
tide.set_initial_time(datetime.datetime(1975, 10, 24, 0, 0))
tnci = uptide.FES2014TidalInterpolator(tide, 'path_to_extracted_fes2014_solution/ocean_tide/')
Alternatively, you can also use the .ini files as provided in the fes source code (https://bitbucket.org/cnes_aviso/fes),
and set the initial time seperately:
tnci = uptide.FES2014TidalInterpolator('path_to/ocean_tide.ini')
tnci.set_initial_time(datetime.datetime(2005,3,1,0,0))
The tidal elevation at time t (in seconds) is obtained via:
tnci.set_time(self, t)
eta = tnci.get_val(self, (lat, lon))
Here -90<lat<90 and 0<lon<360. Finally, the long period (longer than a year) tidal constituents can be excluded via:
tnci = uptide.FES2014TidalInterpolator(..., include_long_period=False)"""
def __init__(self, tide_or_fes_ini_file,
fes_data_path=None, include_long_period=True):
# only import here to avoid hard dependency on fes
# there are two versions of this, the old (pre 2.9.1) is imported as fes,
# but from 2.9.1 we need to `import pyfes`
try:
import fes
except ImportError:
try:
import pyfes as fes
except ImportError:
raise ImportError("Failed to import fes. See https://github.com/stephankramer/uptide for installation instructions.")
if isinstance(tide_or_fes_ini_file, Tides):
self.set_initial_time(tide_or_fes_ini_file.datetime0)
with temporary_fes_ini_file(tide_or_fes_ini_file, fes_data_path) as f:
self.fh = fes.Handler("ocean", "io", f.name)
else:
assert fes_data_path is None, "Do not provide fes_data_path if fes_ini_file is specified"
self.fh = fes.Handler("ocean", "io", tide_or_fes_ini_file)
self.include_long_period = include_long_period
def set_time(self, t):
"""Set time (in seconds) at which to reconstruct tide
Time is with respect to datetime set via set_initial_time() method on the FES2014TidalInterpolator itself
or the initial Tides object provided."""
self.current_datetime = self.datetime0 + datetime.timedelta(seconds=t)
def get_val(self, x):
"""Evaluate tide in location x=(lat, lon)
Here -90<lat<90 and 0<lon<360."""
try:
# old API:
st, lt = self.fh.scalar(x[0], x[1], self.current_datetime)
except AttributeError:
# new API
st, lt, fes_min = self.fh.calculate(numpy.array([x[1]]), numpy.array([x[0]]), numpy.array([self.current_datetime]))
# FES2014 is in cm, others are all in m
if self.include_long_period:
return (st+lt) * 0.01
else:
return st * 0.01
|
stephankramer/uptide
|
uptide/fes_interpolator.py
|
Python
|
lgpl-3.0
| 6,046
|
[
"NetCDF"
] |
b94fa3087228c08d34490375cf0c62c4d4acce5e9d1fa90798131c8b0fb8fa53
|
"""function y = ctemh_cryoFrank(k,params)
% from Kirkland, adapted for cryo (EMAN1) by P. Schwander
% Version V 1.1
% Copyright (c) UWM, Peter Schwander 2010 MATLAB version
% '''
% Copyright (c) Columbia University Hstau Liao 2018 (python version)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
% Here, the damping envelope is characterized by a single parameter B
% see J. Frank
% params(1) Cs in mm
% params(2) df in Angstrom, a positive value is underfocus
% params(3) Electron energy in keV
% params(4) B in A-2
% Note: we assume |k| = s
"""
import sys
import numpy as np
import math
def op(k,params):
Cs = params[0]*1.0e7
df = params[1]
kev = params[2]
B = params[3]
ampc = params[4]
mo = 511.0
hc = 12.3986
wav = (2*mo)+kev
wav = hc/np.sqrt(wav*kev)
w1 = np.pi*Cs*wav*wav*wav
w2 = np.pi*wav*df
k2 = k*k
#wi = exp(-2*B*k2); % B. Sander et al. / Journal of Structural Biology 142 (2003) 392?401, CHECKCHECK
sigm = B/math.sqrt(2*math.log(2)) # B is Gaussian Env. Halfwidth
#sigm = B/2;
wi = np.exp(-k2/(2*sigm**2))
wr = (0.5*w1*k2-w2)*k2 # gam = (pi/2)Cs lam^3 k^4 - pi lam df k^2
y = (np.sin(wr)-ampc*np.cos(wr))*wi
return y
if __name__ == '__main__':
k = sys.argv[1]
params = sys.argv[2]
result = op(k,params)
|
hstau/covar-cryo
|
covariance/ctemh_cryoFrank.py
|
Python
|
gpl-2.0
| 1,344
|
[
"Gaussian"
] |
c599fd237c6a7edb13758383b325815284fb73bb935df6c97fc689216fbbb898
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.