text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
"""Pylab (matplotlib) support utilities.
Authors
-------
* Fernando Perez.
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2009 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
from io import BytesIO
from IPython.core.display import _pngxy
from IPython.utils.decorators import flag_calls
# If user specifies a GUI, that dictates the backend, otherwise we read the
# user's mpl default from the mpl rc structure
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'osx': 'MacOSX',
'inline' : 'module://IPython.kernel.zmq.pylab.backend_inline'}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
# Our tests expect backend2gui to just return 'qt'
backend2gui['Qt4Agg'] = 'qt'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
#-----------------------------------------------------------------------------
# Matplotlib utilities
#-----------------------------------------------------------------------------
def getfigs(*fig_nums):
"""Get a list of matplotlib figures by figure numbers.
If no arguments are given, all available figures are returned. If the
argument list contains references to invalid figures, a warning is printed
but the function continues pasting further figures.
Parameters
----------
figs : tuple
A tuple of ints giving the figure numbers of the figures to return.
"""
from matplotlib._pylab_helpers import Gcf
if not fig_nums:
fig_managers = Gcf.get_all_fig_managers()
return [fm.canvas.figure for fm in fig_managers]
else:
figs = []
for num in fig_nums:
f = Gcf.figs.get(num)
if f is None:
print('Warning: figure %s not available.' % num)
else:
figs.append(f.canvas.figure)
return figs
def figsize(sizex, sizey):
"""Set the default figure size to be [sizex, sizey].
This is just an easy to remember, convenience wrapper that sets::
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
"""
import matplotlib
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
def print_figure(fig, fmt='png'):
"""Convert a figure to svg or png for inline display."""
from matplotlib import rcParams
# When there's an empty figure, we shouldn't return anything, otherwise we
# get big blank areas in the qt console.
if not fig.axes and not fig.lines:
return
fc = fig.get_facecolor()
ec = fig.get_edgecolor()
bytes_io = BytesIO()
dpi = rcParams['savefig.dpi']
if fmt == 'retina':
dpi = dpi * 2
fmt = 'png'
fig.canvas.print_figure(bytes_io, format=fmt, bbox_inches='tight',
facecolor=fc, edgecolor=ec, dpi=dpi)
data = bytes_io.getvalue()
return data
def retina_figure(fig):
"""format a figure as a pixel-doubled (retina) PNG"""
pngdata = print_figure(fig, fmt='retina')
w, h = _pngxy(pngdata)
metadata = dict(width=w//2, height=h//2)
return pngdata, metadata
# We need a little factory function here to create the closure where
# safe_execfile can live.
def mpl_runner(safe_execfile):
"""Factory to return a matplotlib-enabled runner for %run.
Parameters
----------
safe_execfile : function
This must be a function with the same interface as the
:meth:`safe_execfile` method of IPython.
Returns
-------
A function suitable for use as the ``runner`` argument of the %run magic
function.
"""
def mpl_execfile(fname,*where,**kw):
"""matplotlib-aware wrapper around safe_execfile.
Its interface is identical to that of the :func:`execfile` builtin.
This is ultimately a call to execfile(), but wrapped in safeties to
properly handle interactive rendering."""
import matplotlib
import matplotlib.pylab as pylab
#print '*** Matplotlib runner ***' # dbg
# turn off rendering until end of script
is_interactive = matplotlib.rcParams['interactive']
matplotlib.interactive(False)
safe_execfile(fname,*where,**kw)
matplotlib.interactive(is_interactive)
# make rendering call now, if the user tried to do it
if pylab.draw_if_interactive.called:
pylab.draw()
pylab.draw_if_interactive.called = False
return mpl_execfile
def select_figure_format(shell, fmt):
"""Select figure format for inline backend, can be 'png', 'retina', or 'svg'.
Using this method ensures only one figure format is active at a time.
"""
from matplotlib.figure import Figure
from IPython.kernel.zmq.pylab import backend_inline
svg_formatter = shell.display_formatter.formatters['image/svg+xml']
png_formatter = shell.display_formatter.formatters['image/png']
if fmt == 'png':
svg_formatter.type_printers.pop(Figure, None)
png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png'))
elif fmt in ('png2x', 'retina'):
svg_formatter.type_printers.pop(Figure, None)
png_formatter.for_type(Figure, retina_figure)
elif fmt == 'svg':
png_formatter.type_printers.pop(Figure, None)
svg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'svg'))
else:
raise ValueError("supported formats are: 'png', 'retina', 'svg', not %r" % fmt)
# set the format to be used in the backend()
backend_inline._figure_format = fmt
#-----------------------------------------------------------------------------
# Code for initializing matplotlib and importing pylab
#-----------------------------------------------------------------------------
def find_gui_and_backend(gui=None, gui_select=None):
"""Given a gui string return the gui and mpl backend.
Parameters
----------
gui : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
gui_select : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
This is any gui already selected by the shell.
Returns
-------
A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
'WXAgg','Qt4Agg','module://IPython.kernel.zmq.pylab.backend_inline').
"""
import matplotlib
if gui and gui != 'auto':
# select backend based on requested gui
backend = backends[gui]
else:
# We need to read the backend from the original data structure, *not*
# from mpl.rcParams, since a prior invocation of %matplotlib may have
# overwritten that.
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParamsOrig['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
# If we have already had a gui active, we need it and inline are the
# ones allowed.
if gui_select and gui != gui_select:
gui = gui_select
backend = backends[gui]
return gui, backend
def activate_matplotlib(backend):
"""Activate the given backend and set interactive to True."""
import matplotlib
matplotlib.interactive(True)
# Matplotlib had a bug where even switch_backend could not force
# the rcParam to update. This needs to be set *before* the module
# magic of switch_backend().
matplotlib.rcParams['backend'] = backend
import matplotlib.pyplot
matplotlib.pyplot.switch_backend(backend)
# This must be imported last in the matplotlib series, after
# backend/interactivity choices have been made
import matplotlib.pylab as pylab
pylab.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pylab.draw_if_interactive = flag_calls(pylab.draw_if_interactive)
def import_pylab(user_ns, import_all=True):
"""Populate the namespace with pylab-related values.
Imports matplotlib, pylab, numpy, and everything from pylab and numpy.
Also imports a few names from IPython (figsize, display, getfigs)
"""
# Import numpy as np/pyplot as plt are conventions we're trying to
# somewhat standardize on. Making them available to users by default
# will greatly help this.
s = ("import numpy\n"
"import matplotlib\n"
"from matplotlib import pylab, mlab, pyplot\n"
"np = numpy\n"
"plt = pyplot\n"
)
exec s in user_ns
if import_all:
s = ("from matplotlib.pylab import *\n"
"from numpy import *\n")
exec s in user_ns
# IPython symbols to add
user_ns['figsize'] = figsize
from IPython.core.display import display
# Add display and getfigs to the user's namespace
user_ns['display'] = display
user_ns['getfigs'] = getfigs
def configure_inline_support(shell, backend):
"""Configure an IPython shell object for matplotlib use.
Parameters
----------
shell : InteractiveShell instance
backend : matplotlib backend
"""
# If using our svg payload backend, register the post-execution
# function that will pick up the results for display. This can only be
# done with access to the real shell object.
# Note: if we can't load the inline backend, then there's no point
# continuing (such as in terminal-only shells in environments without
# zeromq available).
try:
from IPython.kernel.zmq.pylab.backend_inline import InlineBackend
except ImportError:
return
from matplotlib import pyplot
cfg = InlineBackend.instance(parent=shell)
cfg.shell = shell
if cfg not in shell.configurables:
shell.configurables.append(cfg)
if backend == backends['inline']:
from IPython.kernel.zmq.pylab.backend_inline import flush_figures
shell.register_post_execute(flush_figures)
# Save rcParams that will be overwrittern
shell._saved_rcParams = dict()
for k in cfg.rc:
shell._saved_rcParams[k] = pyplot.rcParams[k]
# load inline_rc
pyplot.rcParams.update(cfg.rc)
else:
from IPython.kernel.zmq.pylab.backend_inline import flush_figures
if flush_figures in shell._post_execute:
shell._post_execute.pop(flush_figures)
if hasattr(shell, '_saved_rcParams'):
pyplot.rcParams.update(shell._saved_rcParams)
del shell._saved_rcParams
# Setup the default figure format
select_figure_format(shell, cfg.figure_format)
|
noslenfa/tdjangorest
|
uw/lib/python2.7/site-packages/IPython/core/pylabtools.py
|
Python
|
apache-2.0
| 11,701
|
[
"Brian"
] |
4041460e4d9de2d6cf63869bdb2c0b3894e0f7acea7a15f12ea59d6ffbbae076
|
import pytest
from capybara.node.element import Element
from capybara.tests.helpers import isselenium
@pytest.mark.requires("js")
class TestEvaluateScript:
def test_evaluates_the_given_script_and_returns_whatever_it_produces(self, session):
session.visit("/with_js")
assert session.evaluate_script("1+3") == 4
def test_ignores_leading_whitespace(self, session):
session.visit("/with_js")
assert session.evaluate_script("""
1 + 3
""") == 4
def test_passes_arguments_to_the_script(self, session):
session.visit("/with_js")
session.evaluate_script("document.getElementById('change').textContent = arguments[0]", "Doodle Funk")
assert session.has_css("#change", text="Doodle Funk")
def test_supports_passing_elements_as_arguments_to_the_script(self, session):
session.visit("/with_js")
el = session.find("css", "#change")
session.evaluate_script("arguments[1].textContent = arguments[0]", "Doodle Funk", el)
assert session.has_css("#change", text="Doodle Funk")
def test_supports_returning_elements(self, session):
session.visit("/with_js")
el = session.evaluate_script("document.getElementById('change')")
assert isinstance(el, Element)
assert el == session.find("css", "#change")
def test_supports_returning_arrays_of_elements(self, session):
session.visit("/form")
elements = session.evaluate_script("document.querySelectorAll('#form_city option')")
for element in elements:
assert isinstance(element, Element)
assert elements == list(session.find("css", "#form_city").find_all("css", "option"))
def test_supports_returning_dicts_with_elements(self, session):
if isselenium(session):
pytest.importorskip("selenium", minversion="3.4.3")
session.visit("/form")
result = session.evaluate_script(
"{a: document.getElementById('form_title'), "
"b: {c: document.querySelectorAll('#form_city option')}}")
assert result == {
'a': session.find("id", "form_title"),
'b': {
'c': list(session.find("css", "#form_city").find_all("css", "option"))}}
|
elliterate/capybara.py
|
capybara/tests/session/test_evaluate_script.py
|
Python
|
mit
| 2,259
|
[
"VisIt"
] |
11f89fd04c3d0ab1a5433bb2affec72c1fde1cbe942d0eef5237bbe89a3569e9
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Development script to test the algorithms of all the model coordination environments
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import LocalGeometryFinder
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import AbstractGeometry
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
from math import factorial
import itertools
from random import shuffle
if __name__ == '__main__':
allcg = AllCoordinationGeometries()
test = raw_input('Standard ("s", all permutations for cn <= 6, 500 random permutations for cn > 6) or on demand')
if test == 's':
perms_def = 'standard'
elif test == 'o':
perms_def = 'on_demand'
else:
try:
nperms = int(test)
perms_def = 'ndefined'
except:
perms_def = 'on_demand'
for coordination in range(1, 13):
print('IN COORDINATION {:d}'.format(coordination))
symbol_name_mapping = allcg.get_symbol_name_mapping(coordination=coordination)
if perms_def == 'standard':
if coordination > 6:
test = '500'
else:
test = 'all'
elif perms_def == 'ndefined':
test = nperms
else:
test = raw_input('Enter if you want to test all possible permutations ("all" or "a") or a given number of random permutations (i.e. "25")')
myindices = range(coordination)
if test == 'all' or test == 'a':
perms_type = 'all'
perms_iterator = itertools.permutations(myindices)
nperms = factorial(coordination)
else:
perms_type = 'explicit'
try:
nperms = int(test)
except:
raise ValueError('Could not turn {} into integer ...'.format(test))
perms_iterator = []
for ii in range(nperms):
shuffle(myindices)
perms_iterator.append(list(myindices))
for cg_symbol, cg_name in symbol_name_mapping.items():
cg = allcg[cg_symbol]
if cg.deactivate:
continue
print('Testing {} ({})'.format(cg_symbol, cg_name))
cg = allcg[cg_symbol]
if cg.points is None:
continue
lgf = LocalGeometryFinder()
lgf.setup_parameters(structure_refinement=lgf.STRUCTURE_REFINEMENT_NONE)
# Reinitialize the itertools permutations
if perms_type == 'all':
perms_iterator = itertools.permutations(myindices)
#Loop on the permutations
iperm = 1
for indices_perm in perms_iterator:
lgf.setup_test_perfect_environment(cg_symbol, indices=indices_perm,
randomness=True, max_random_dist=0.1,
random_translation=True, random_rotation=True, random_scale=True)
lgf.perfect_geometry = AbstractGeometry.from_cg(cg=cg)
points_perfect = lgf.perfect_geometry.points_wocs_ctwocc()
print('Perm # {:d}/{:d} : '.format(iperm, nperms), indices_perm)
algos_results = []
for algo in cg.algorithms:
if algo.algorithm_type == 'EXPLICIT_PERMUTATIONS':
results = lgf.coordination_geometry_symmetry_measures(coordination_geometry=cg,
points_perfect=points_perfect)
# raise ValueError('Do something for the explicit ones ... (these should anyway be by far ok!)')
else:
results = lgf.coordination_geometry_symmetry_measures_separation_plane(coordination_geometry=cg,
separation_plane_algo=algo,
points_perfect=points_perfect)
algos_results.append(min(results[0]))
if not min(results[0]) < 1.5:
print('Following is not close to 0.0 ...')
raw_input(results)
print(' => ', algos_results)
iperm += 1
|
matk86/pymatgen
|
dev_scripts/chemenv/test_algos_all_geoms.py
|
Python
|
mit
| 4,859
|
[
"pymatgen"
] |
67f97cfec49d8301d986376d216f8591501d44842b89486d88bfa2a853aa0533
|
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import numpy as np
from neon import NervanaObject
class Initializer(NervanaObject):
"""
Abstract base class from which parameter tensor initializers inherit.
"""
def fill(self, param):
raise NotImplementedError()
class Constant(Initializer):
"""
A class for initializing parameter tensors with a single value.
Args:
val (float, optional): The value to assign to all tensor elements
"""
def __init__(self, val=0.0, name="constantInit"):
super(Constant, self).__init__(name=name)
self.val = val
def fill(self, param):
param[:] = self.val
class Uniform(Initializer):
"""
A class for initializing parameter tensors with values drawn from
a uniform distribution.
Args:
low (float, optional): Lower bound of range from which we draw values.
high (float, optional): Upper bound of range from which we draw values.
"""
def __init__(self, low=0.0, high=1.0, name="uniformInit"):
super(Uniform, self).__init__(name=name)
self.low, self.high = (low, high)
def fill(self, param):
param[:] = self.be.rng.uniform(self.low, self.high, param.shape)
class Gaussian(Initializer):
"""
A class for initializing parameter tensors with values drawn from
a normal distribution.
Args:
loc (float, optional): The mean of the normal (mu).
scale (float, optional): The standard deviation of the normal (sigma).
"""
def __init__(self, loc=0.0, scale=1.0, name="gaussianInit"):
super(Gaussian, self).__init__(name=name)
self.loc, self.scale = (loc, scale)
def fill(self, param):
param[:] = self.be.rng.normal(self.loc, self.scale, param.shape)
class GlorotUniform(Initializer):
"""
A class for initializing parameter tensors with values drawn from
a uniform distribution over a region whose bounds have been determined
using the policy described in:
"Understanding the difficulty of training deep feedforward neural networks"
(http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)
We normalize the range by the scaled average of the input dimension
and the output dimension of the tensor in question.
"""
def __init__(self, name="autouniformInit"):
super(GlorotUniform, self).__init__(name=name)
def fill(self, param):
k = np.sqrt(6.0 / (param.shape[0] + param.shape[1]))
param[:] = self.be.rng.uniform(-k, k, param.shape)
class Xavier(Initializer):
"""
Alternate form of Glorot where only input nodes are used for scaling range.
Args:
local (bool, optional): Whether the layer type is local (Convolutional) or not.
default is True.
"""
def __init__(self, local=True, name="xavier"):
super(Xavier, self).__init__(name=name)
self.local = local
def fill(self, param):
fan_in = param.shape[0 if self.local else 1]
scale = np.sqrt(3./fan_in)
param[:] = self.be.rng.uniform(-scale, scale, param.shape)
class Orthonormal(Initializer):
"""
Implementation taken from Lasagne. Reference: Saxe et al., http://arxiv.org/abs/1312.6120
"""
def __init__(self, scale=1.1, name="orthonormal"):
super(Orthonormal, self).__init__(name=name)
self.scale = scale
def fill(self, param):
a = np.random.normal(0.0, 1.0, param.shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
# pick the one with the correct shape
q = u if u.shape == param.shape else v
param[:] = self.scale * q
|
nhynes/neon
|
neon/initializers/initializer.py
|
Python
|
apache-2.0
| 4,381
|
[
"Gaussian"
] |
ac1bca0eaffd7b0945ab03998b8e71fd6a4c7cccff2b8254815b2cc723642edf
|
# -*- coding: utf-8 -*-
"""
Introduction to different stimulus types in Python.
* different stimulus types and properties
* cross-monitor consistent stimulus size in cm and degrees
* within-monitor equal stimulus luminsities using the DKL colorspace
Jonas Kristoffer Lindeløv, 2014. Revised 2015.
"""
# -----------------------------
# STIMULUS TYPES AND PROPERTIES
# -----------------------------
# Generic layout
from psychopy import visual, event
win = visual.Window()
stim_text = visual.TextStim(win, text='Welcome', color='black')
stim_text.draw()
win.flip()
event.waitKeys()
"""
Exercise on changing parameters:
* change background color of the Window to black. And don't do fullscreen.
Hint: if you want to do this while running the experiment, you can either:
(1) change win.color and call win.flip() twice for it to take effect.
(2) close the current window using win.close() and create a new.
* change the height and orientation of the TextStim.
* event.waitKeys() waits infinitely. Make it wait for a maximum of 5 seconds
and only react to the keys ['space', 'n', 'm'].
"""
# SOLUTION:
win.color = 'black'
win.flip()
win.flip()
event.waitKeys()
stim_text = visual.TextStim(win, text='Welcome', color='gray', height=0.2, ori=30)
stim_text.draw()
win.flip()
event.waitKeys(maxWait=5, keyList=['space', 'n', 'm'])
# ImageStim
stim_image = visual.ImageStim(win, image='xkcd.png')
stim_image.size *= [1, -1] # flip vertically
print stim_image.size # the size of the stimulus in current units
stim_text.pos = [0, 0.6]
stim_image.draw()
stim_text.draw()
win.flip()
event.waitKeys()
"""
Exercise on GratingStim, parameters and drawing:
* make a GratingStim (call it e.g. stim_grating) and show it to see what it does
* change various parameters after it is initiated,
e.g. make a gabor patch by setting mask to a gaussian and spatial
frequency (sf) to 10.
* show it on top of stim_image and stim_text with reduced opacity
hint: to draw on top, simply draw last.
* pros: try animating stuff by wrapping draw(), flip() and a change of
stimulus attributes in a loop, e.g. with the following per-frame change:
stim_image.ori += 0.1 # change stim, attribute and value
if event.getKeys(): break # to end on a keypress
"""
# SOLUTION
stim_grating = visual.GratingStim(win)
stim_grating.mask = 'gauss'
stim_grating.sf = 10
stim_grating.pos = [-0.1, -0.1]
stim_grating.opacity = 0.5
stim_image.draw()
stim_text.draw()
stim_grating.draw()
win.flip()
event.waitKeys()
# Animate
while True:
stim_image.ori += 0.5
stim_grating.size += 0.01
stim_grating.sf *= 1.005
stim_image.draw()
stim_grating.draw()
win.flip()
if event.getKeys(): break # to end on a keypress
# SHOW SLIDES HERE
# -----------------------------
# STIMULUS SIZE
# Handling actual stimulus size for consistent presentation across monitors!
# -----------------------------
# Demonstrate monitor center!
from psychopy import visual, monitors
my_monitor = monitors.Monitor('testMonitor', width=34.3, distance=65)
my_monitor.setSizePix([1024, 768])
# Start a new window with degrees as default unit
win.close()
win = visual.Window(monitor=my_monitor, color='black')
# In cm
stim_image = visual.ImageStim(win, image='xkcd.png', size=[10, 10], units='cm')
stim_image.draw()
win.flip()
event.waitKeys()
# in degrees
stim_image.units = 'deg'
stim_image.draw()
win.flip()
event.waitKeys()
"""
Exercise on visual size precision:
* Specify your own monitor dimensions in monitor center (using Coder or Builder)
* Specify your own monitor dimensions in code
* change the size of the image, e.g. to [3, 6] and verify that it actually
is the expected size.
* Draw some different kinds of stimuli using cm as units and verify size.
* Some stimuli aren't the actual expected size. Adjust the parameters
so that they are the size that you want.
* Pro: draw different kinds of stimuli using deg and verify whether they
have the expected size. (hint: math.tan, math.radians)
* Pro: textStims are smaller than the expected size. The relationship is
however approximately linear. Determine the linear function that makes
the actual size correspond to the specified size. Do you think it would
be general across monitors? And/or general across fonts?
"""
# Checking size in degrees. size = tan(angle_in_radians) * distance
import ppc
print ppc.deg2cm(10, 65)
print ppc.deg2cm(3, 60)
# ----------------------
# EQUILUMINANT STIMULI
# ... especially if you calibrate your monitor and get a conversion matrix
# ----------------------
# Specify colors using the DKL colorspace
# It is DKL = [luminance, hue, saturation] where
# luminance is degrees -90 to 90
# hue is degrees 0 to 360
# saturation is excentricity 0 to 1
stim_shape1 = visual.ShapeStim(win, units='cm', size=5,
fillColorSpace='dkl', fillColor=[0, 45, 1], pos=[0, 2]) # strong, medium luminance
stim_shape2 = visual.ShapeStim(win, units='cm', size=5,
fillColorSpace='dkl', fillColor=[0, 215, 0.3], pos=[0, -2], # should be a pale isoluminant contrast color
vertices=[[0,0], [1,0], [1,1], [0,1]])
stim_shape2.vertices -= [0.5, 0.5]
stim_shape1.draw()
stim_shape2.draw()
win.flip()
event.waitKeys()
# Convert them to "normal" RGB just to see
print ppc.dkl2rgb([0, 45, 1]) # first parameter should be the same
print ppc.dkl2rgb([0, 0, -1]) # first parameter should be the same
"""
Exercise on colors:
* make a ShapeStim and change its line and fill colors to something in DKL.
hint: ShapeStim has the attibutes fillcolor and linecolor.
* try changing the color of the stim_image from earlier.
"""
# ----------------------
# SOUNDS
# ... are continuous so now we can use core.wait().
# ----------------------
# Psychopy sounds
from psychopy import sound, core
clock = core.Clock()
sound_pygame = sound.SoundPygame('beep.wav', secs=0.1)
sound = sound.Sound('beep.wav', secs=0.1)
sound.play()
core.wait(0.5)
#playing
sound_pygame.play()
core.wait(0.5)
# winsound
sound_winsound = ppc.Sound('beep.wav')
sound_winsound.play()
core.wait(0.5)
"""
Exercise:
* sound.Sound can generate sounds. Try setting value to either 440 and 'C'.
* Look at other parameters such as octave, secs, and volume.
* Extreme pros: create an Nx2 numpy array containing the actual audio
samples and play it!
"""
|
lindeloev/psychopy-course
|
ppc2_stimuli.py
|
Python
|
gpl-2.0
| 6,668
|
[
"Gaussian"
] |
48810288013bb95581f21ca1157d9f8690b3b3dcf98c4dcd70626e90451d3edc
|
# Copyright 1999 by Jeffrey Chang. All rights reserved.
# Copyright 2000 by Jeffrey Chang. All rights reserved.
# Revisions Copyright 2007 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
This module provides code to work with the prosite dat file from
Prosite.
http://www.expasy.ch/prosite/
Tested with:
Release 15.0, July 1998
Release 16.0, July 1999
Release 17.0, Dec 2001
Release 19.0, Mar 2006
Classes:
Record Holds Prosite data.
PatternHit Holds data from a hit against a Prosite pattern.
Iterator Iterates over entries in a Prosite file.
Dictionary Accesses a Prosite file using a dictionary interface.
RecordParser Parses a Prosite record into a Record object.
_Scanner Scans Prosite-formatted data.
_RecordConsumer Consumes Prosite data to a Record object.
Functions:
scan_sequence_expasy Scan a sequence for occurrences of Prosite patterns.
index_file Index a Prosite file for a Dictionary.
_extract_record Extract Prosite data from a web page.
_extract_pattern_hits Extract Prosite patterns from a web page.
"""
from types import *
import re
import sgmllib
from Bio import File
from Bio import Index
from Bio.ParserSupport import *
# There is probably a cleaner way to write the read/parse functions
# if we don't use the "parser = RecordParser(); parser.parse(handle)"
# approach. Leaving that for the next revision of Bio.Prosite.
def parse(handle):
import cStringIO
parser = RecordParser()
text = ""
for line in handle:
text += line
if line[:2]=='//':
handle = cStringIO.StringIO(text)
record = parser.parse(handle)
text = ""
if not record: # Then this was the copyright notice
continue
yield record
def read(handle):
parser = RecordParser()
try:
record = parser.parse(handle)
except ValueError, error:
if error.message=="There doesn't appear to be a record":
raise ValueError, "No Prosite record found"
else:
raise error
# We should have reached the end of the record by now
remainder = handle.read()
if remainder:
raise ValueError, "More than one Prosite record found"
return record
class Record:
"""Holds information from a Prosite record.
Members:
name ID of the record. e.g. ADH_ZINC
type Type of entry. e.g. PATTERN, MATRIX, or RULE
accession e.g. PS00387
created Date the entry was created. (MMM-YYYY)
data_update Date the 'primary' data was last updated.
info_update Date data other than 'primary' data was last updated.
pdoc ID of the PROSITE DOCumentation.
description Free-format description.
pattern The PROSITE pattern. See docs.
matrix List of strings that describes a matrix entry.
rules List of rule definitions (from RU lines). (strings)
prorules List of prorules (from PR lines). (strings)
NUMERICAL RESULTS
nr_sp_release SwissProt release.
nr_sp_seqs Number of seqs in that release of Swiss-Prot. (int)
nr_total Number of hits in Swiss-Prot. tuple of (hits, seqs)
nr_positive True positives. tuple of (hits, seqs)
nr_unknown Could be positives. tuple of (hits, seqs)
nr_false_pos False positives. tuple of (hits, seqs)
nr_false_neg False negatives. (int)
nr_partial False negatives, because they are fragments. (int)
COMMENTS
cc_taxo_range Taxonomic range. See docs for format
cc_max_repeat Maximum number of repetitions in a protein
cc_site Interesting site. list of tuples (pattern pos, desc.)
cc_skip_flag Can this entry be ignored?
cc_matrix_type
cc_scaling_db
cc_author
cc_ft_key
cc_ft_desc
cc_version version number (introduced in release 19.0)
DATA BANK REFERENCES - The following are all
lists of tuples (swiss-prot accession,
swiss-prot name)
dr_positive
dr_false_neg
dr_false_pos
dr_potential Potential hits, but fingerprint region not yet available.
dr_unknown Could possibly belong
pdb_structs List of PDB entries.
"""
def __init__(self):
self.name = ''
self.type = ''
self.accession = ''
self.created = ''
self.data_update = ''
self.info_update = ''
self.pdoc = ''
self.description = ''
self.pattern = ''
self.matrix = []
self.rules = []
self.prorules = []
self.postprocessing = []
self.nr_sp_release = ''
self.nr_sp_seqs = ''
self.nr_total = (None, None)
self.nr_positive = (None, None)
self.nr_unknown = (None, None)
self.nr_false_pos = (None, None)
self.nr_false_neg = None
self.nr_partial = None
self.cc_taxo_range = ''
self.cc_max_repeat = ''
self.cc_site = []
self.cc_skip_flag = ''
self.dr_positive = []
self.dr_false_neg = []
self.dr_false_pos = []
self.dr_potential = []
self.dr_unknown = []
self.pdb_structs = []
class PatternHit:
"""Holds information from a hit against a Prosite pattern.
Members:
name ID of the record. e.g. ADH_ZINC
accession e.g. PS00387
pdoc ID of the PROSITE DOCumentation.
description Free-format description.
matches List of tuples (start, end, sequence) where
start and end are indexes of the match, and sequence is
the sequence matched.
"""
def __init__(self):
self.name = None
self.accession = None
self.pdoc = None
self.description = None
self.matches = []
def __str__(self):
lines = []
lines.append("%s %s %s" % (self.accession, self.pdoc, self.name))
lines.append(self.description)
lines.append('')
if len(self.matches) > 1:
lines.append("Number of matches: %s" % len(self.matches))
for i in range(len(self.matches)):
start, end, seq = self.matches[i]
range_str = "%d-%d" % (start, end)
if len(self.matches) > 1:
lines.append("%7d %10s %s" % (i+1, range_str, seq))
else:
lines.append("%7s %10s %s" % (' ', range_str, seq))
return "\n".join(lines)
class Iterator:
"""Returns one record at a time from a Prosite file.
Methods:
next Return the next record from the stream, or None.
"""
def __init__(self, handle, parser=None):
"""__init__(self, handle, parser=None)
Create a new iterator. handle is a file-like object. parser
is an optional Parser object to change the results into another form.
If set to None, then the raw contents of the file will be returned.
"""
if type(handle) is not FileType and type(handle) is not InstanceType:
raise ValueError, "I expected a file handle or file-like object"
self._uhandle = File.UndoHandle(handle)
self._parser = parser
def next(self):
"""next(self) -> object
Return the next Prosite record from the file. If no more records,
return None.
"""
# Skip the copyright info, if it's the first record.
line = self._uhandle.peekline()
if line[:2] == 'CC':
while 1:
line = self._uhandle.readline()
if not line:
break
if line[:2] == '//':
break
if line[:2] != 'CC':
raise ValueError, \
"Oops, where's the copyright?"
lines = []
while 1:
line = self._uhandle.readline()
if not line:
break
lines.append(line)
if line[:2] == '//':
break
if not lines:
return None
data = "".join(lines)
if self._parser is not None:
return self._parser.parse(File.StringHandle(data))
return data
def __iter__(self):
return iter(self.next, None)
class Dictionary:
"""Accesses a Prosite file using a dictionary interface.
"""
__filename_key = '__filename'
def __init__(self, indexname, parser=None):
"""__init__(self, indexname, parser=None)
Open a Prosite Dictionary. indexname is the name of the
index for the dictionary. The index should have been created
using the index_file function. parser is an optional Parser
object to change the results into another form. If set to None,
then the raw contents of the file will be returned.
"""
self._index = Index.Index(indexname)
self._handle = open(self._index[Dictionary.__filename_key])
self._parser = parser
def __len__(self):
return len(self._index)
def __getitem__(self, key):
start, len = self._index[key]
self._handle.seek(start)
data = self._handle.read(len)
if self._parser is not None:
return self._parser.parse(File.StringHandle(data))
return data
def __getattr__(self, name):
return getattr(self._index, name)
class ExPASyDictionary:
"""Access PROSITE at ExPASy using a read-only dictionary interface.
"""
def __init__(self, delay=5.0, parser=None):
"""__init__(self, delay=5.0, parser=None)
Create a new Dictionary to access PROSITE. parser is an optional
parser (e.g. Prosite.RecordParser) object to change the results
into another form. If set to None, then the raw contents of the
file will be returned. delay is the number of seconds to wait
between each query.
"""
import warnings
from Bio.WWW import RequestLimiter
warnings.warn("Bio.Prosite.ExPASyDictionary is deprecated. Please use the function Bio.ExPASy.get_prosite_raw instead.",
DeprecationWarning)
self.parser = parser
self.limiter = RequestLimiter(delay)
def __len__(self):
raise NotImplementedError, "Prosite contains lots of entries"
def clear(self):
raise NotImplementedError, "This is a read-only dictionary"
def __setitem__(self, key, item):
raise NotImplementedError, "This is a read-only dictionary"
def update(self):
raise NotImplementedError, "This is a read-only dictionary"
def copy(self):
raise NotImplementedError, "You don't need to do this..."
def keys(self):
raise NotImplementedError, "You don't really want to do this..."
def items(self):
raise NotImplementedError, "You don't really want to do this..."
def values(self):
raise NotImplementedError, "You don't really want to do this..."
def has_key(self, id):
"""has_key(self, id) -> bool"""
try:
self[id]
except KeyError:
return 0
return 1
def get(self, id, failobj=None):
try:
return self[id]
except KeyError:
return failobj
raise "How did I get here?"
def __getitem__(self, id):
"""__getitem__(self, id) -> object
Return a Prosite entry. id is either the id or accession
for the entry. Raises a KeyError if there's an error.
"""
from Bio.WWW import ExPASy
# First, check to see if enough time has passed since my
# last query.
self.limiter.wait()
try:
handle = ExPASy.get_prosite_entry(id)
except IOError:
raise KeyError, id
try:
handle = File.StringHandle(_extract_record(handle))
except ValueError:
raise KeyError, id
if self.parser is not None:
return self.parser.parse(handle)
return handle.read()
class RecordParser(AbstractParser):
"""Parses Prosite data into a Record object.
"""
def __init__(self):
self._scanner = _Scanner()
self._consumer = _RecordConsumer()
def parse(self, handle):
self._scanner.feed(handle, self._consumer)
return self._consumer.data
class _Scanner:
"""Scans Prosite-formatted data.
Tested with:
Release 15.0, July 1998
"""
def feed(self, handle, consumer):
"""feed(self, handle, consumer)
Feed in Prosite data for scanning. handle is a file-like
object that contains prosite data. consumer is a
Consumer object that will receive events as the report is scanned.
"""
if isinstance(handle, File.UndoHandle):
uhandle = handle
else:
uhandle = File.UndoHandle(handle)
consumer.finished = False
while not consumer.finished:
line = uhandle.peekline()
if not line:
break
elif is_blank_line(line):
# Skip blank lines between records
uhandle.readline()
continue
elif line[:2] == 'ID':
self._scan_record(uhandle, consumer)
elif line[:2] == 'CC':
self._scan_copyrights(uhandle, consumer)
else:
raise ValueError, "There doesn't appear to be a record"
def _scan_copyrights(self, uhandle, consumer):
consumer.start_copyrights()
self._scan_line('CC', uhandle, consumer.copyright, any_number=1)
self._scan_terminator(uhandle, consumer)
consumer.end_copyrights()
def _scan_record(self, uhandle, consumer):
consumer.start_record()
for fn in self._scan_fns:
fn(self, uhandle, consumer)
# In Release 15.0, C_TYPE_LECTIN_1 has the DO line before
# the 3D lines, instead of the other way around.
# Thus, I'll give the 3D lines another chance after the DO lines
# are finished.
if fn is self._scan_do.im_func:
self._scan_3d(uhandle, consumer)
consumer.end_record()
def _scan_line(self, line_type, uhandle, event_fn,
exactly_one=None, one_or_more=None, any_number=None,
up_to_one=None):
# Callers must set exactly one of exactly_one, one_or_more, or
# any_number to a true value. I do not explicitly check to
# make sure this function is called correctly.
# This does not guarantee any parameter safety, but I
# like the readability. The other strategy I tried was have
# parameters min_lines, max_lines.
if exactly_one or one_or_more:
read_and_call(uhandle, event_fn, start=line_type)
if one_or_more or any_number:
while 1:
if not attempt_read_and_call(uhandle, event_fn,
start=line_type):
break
if up_to_one:
attempt_read_and_call(uhandle, event_fn, start=line_type)
def _scan_id(self, uhandle, consumer):
self._scan_line('ID', uhandle, consumer.identification, exactly_one=1)
def _scan_ac(self, uhandle, consumer):
self._scan_line('AC', uhandle, consumer.accession, exactly_one=1)
def _scan_dt(self, uhandle, consumer):
self._scan_line('DT', uhandle, consumer.date, exactly_one=1)
def _scan_de(self, uhandle, consumer):
self._scan_line('DE', uhandle, consumer.description, exactly_one=1)
def _scan_pa(self, uhandle, consumer):
self._scan_line('PA', uhandle, consumer.pattern, any_number=1)
def _scan_ma(self, uhandle, consumer):
self._scan_line('MA', uhandle, consumer.matrix, any_number=1)
## # ZN2_CY6_FUNGAL_2, DNAJ_2 in Release 15
## # contain a CC line buried within an 'MA' line. Need to check
## # for that.
## while 1:
## if not attempt_read_and_call(uhandle, consumer.matrix, start='MA'):
## line1 = uhandle.readline()
## line2 = uhandle.readline()
## uhandle.saveline(line2)
## uhandle.saveline(line1)
## if line1[:2] == 'CC' and line2[:2] == 'MA':
## read_and_call(uhandle, consumer.comment, start='CC')
## else:
## break
def _scan_pp(self, uhandle, consumer):
#New PP line, PostProcessing, just after the MA line
self._scan_line('PP', uhandle, consumer.postprocessing, any_number=1)
def _scan_ru(self, uhandle, consumer):
self._scan_line('RU', uhandle, consumer.rule, any_number=1)
def _scan_nr(self, uhandle, consumer):
self._scan_line('NR', uhandle, consumer.numerical_results,
any_number=1)
def _scan_cc(self, uhandle, consumer):
self._scan_line('CC', uhandle, consumer.comment, any_number=1)
def _scan_dr(self, uhandle, consumer):
self._scan_line('DR', uhandle, consumer.database_reference,
any_number=1)
def _scan_3d(self, uhandle, consumer):
self._scan_line('3D', uhandle, consumer.pdb_reference,
any_number=1)
def _scan_pr(self, uhandle, consumer):
#New PR line, ProRule, between 3D and DO lines
self._scan_line('PR', uhandle, consumer.prorule, any_number=1)
def _scan_do(self, uhandle, consumer):
self._scan_line('DO', uhandle, consumer.documentation, exactly_one=1)
def _scan_terminator(self, uhandle, consumer):
self._scan_line('//', uhandle, consumer.terminator, exactly_one=1)
#This is a list of scan functions in the order expected in the file file.
#The function definitions define how many times each line type is exected
#(or if optional):
_scan_fns = [
_scan_id,
_scan_ac,
_scan_dt,
_scan_de,
_scan_pa,
_scan_ma,
_scan_pp,
_scan_ru,
_scan_nr,
_scan_cc,
# This is a really dirty hack, and should be fixed properly at
# some point. ZN2_CY6_FUNGAL_2, DNAJ_2 in Rel 15 and PS50309
# in Rel 17 have lines out of order. Thus, I have to rescan
# these, which decreases performance.
_scan_ma,
_scan_nr,
_scan_cc,
_scan_dr,
_scan_3d,
_scan_pr,
_scan_do,
_scan_terminator
]
class _RecordConsumer(AbstractConsumer):
"""Consumer that converts a Prosite record to a Record object.
Members:
data Record with Prosite data.
"""
def __init__(self):
self.data = None
def start_record(self):
self.data = Record()
def end_record(self):
self._clean_record(self.data)
def identification(self, line):
cols = line.split()
if len(cols) != 3:
raise ValueError, "I don't understand identification line\n%s" % \
line
self.data.name = self._chomp(cols[1]) # don't want ';'
self.data.type = self._chomp(cols[2]) # don't want '.'
def accession(self, line):
cols = line.split()
if len(cols) != 2:
raise ValueError, "I don't understand accession line\n%s" % line
self.data.accession = self._chomp(cols[1])
def date(self, line):
uprline = line.upper()
cols = uprline.split()
# Release 15.0 contains both 'INFO UPDATE' and 'INF UPDATE'
if cols[2] != '(CREATED);' or \
cols[4] != '(DATA' or cols[5] != 'UPDATE);' or \
cols[7][:4] != '(INF' or cols[8] != 'UPDATE).':
raise ValueError, "I don't understand date line\n%s" % line
self.data.created = cols[1]
self.data.data_update = cols[3]
self.data.info_update = cols[6]
def description(self, line):
self.data.description = self._clean(line)
def pattern(self, line):
self.data.pattern = self.data.pattern + self._clean(line)
def matrix(self, line):
self.data.matrix.append(self._clean(line))
def postprocessing(self, line):
postprocessing = self._clean(line).split(";")
self.data.postprocessing.extend(postprocessing)
def rule(self, line):
self.data.rules.append(self._clean(line))
def numerical_results(self, line):
cols = self._clean(line).split(";")
for col in cols:
if not col:
continue
qual, data = [word.lstrip() for word in col.split("=")]
if qual == '/RELEASE':
release, seqs = data.split(",")
self.data.nr_sp_release = release
self.data.nr_sp_seqs = int(seqs)
elif qual == '/FALSE_NEG':
self.data.nr_false_neg = int(data)
elif qual == '/PARTIAL':
self.data.nr_partial = int(data)
elif qual in ['/TOTAL', '/POSITIVE', '/UNKNOWN', '/FALSE_POS']:
m = re.match(r'(\d+)\((\d+)\)', data)
if not m:
raise error, "Broken data %s in comment line\n%s" % \
(repr(data), line)
hits = tuple(map(int, m.groups()))
if(qual == "/TOTAL"):
self.data.nr_total = hits
elif(qual == "/POSITIVE"):
self.data.nr_positive = hits
elif(qual == "/UNKNOWN"):
self.data.nr_unknown = hits
elif(qual == "/FALSE_POS"):
self.data.nr_false_pos = hits
else:
raise ValueError, "Unknown qual %s in comment line\n%s" % \
(repr(qual), line)
def comment(self, line):
#Expect CC lines like this:
#CC /TAXO-RANGE=??EPV; /MAX-REPEAT=2;
#Can (normally) split on ";" and then on "="
cols = self._clean(line).split(";")
for col in cols:
if not col or col[:17] == 'Automatic scaling':
# DNAJ_2 in Release 15 has a non-standard comment line:
# CC Automatic scaling using reversed database
# Throw it away. (Should I keep it?)
continue
if col.count("=") == 0 :
#Missing qualifier! Can we recover gracefully?
#For example, from Bug 2403, in PS50293 have:
#CC /AUTHOR=K_Hofmann; N_Hulo
continue
qual, data = [word.lstrip() for word in col.split("=")]
if qual == '/TAXO-RANGE':
self.data.cc_taxo_range = data
elif qual == '/MAX-REPEAT':
self.data.cc_max_repeat = data
elif qual == '/SITE':
pos, desc = data.split(",")
self.data.cc_site.append((int(pos), desc))
elif qual == '/SKIP-FLAG':
self.data.cc_skip_flag = data
elif qual == '/MATRIX_TYPE':
self.data.cc_matrix_type = data
elif qual == '/SCALING_DB':
self.data.cc_scaling_db = data
elif qual == '/AUTHOR':
self.data.cc_author = data
elif qual == '/FT_KEY':
self.data.cc_ft_key = data
elif qual == '/FT_DESC':
self.data.cc_ft_desc = data
elif qual == '/VERSION':
self.data.cc_version = data
else:
raise ValueError, "Unknown qual %s in comment line\n%s" % \
(repr(qual), line)
def database_reference(self, line):
refs = self._clean(line).split(";")
for ref in refs:
if not ref:
continue
acc, name, type = [word.strip() for word in ref.split(",")]
if type == 'T':
self.data.dr_positive.append((acc, name))
elif type == 'F':
self.data.dr_false_pos.append((acc, name))
elif type == 'N':
self.data.dr_false_neg.append((acc, name))
elif type == 'P':
self.data.dr_potential.append((acc, name))
elif type == '?':
self.data.dr_unknown.append((acc, name))
else:
raise ValueError, "I don't understand type flag %s" % type
def pdb_reference(self, line):
cols = line.split()
for id in cols[1:]: # get all but the '3D' col
self.data.pdb_structs.append(self._chomp(id))
def prorule(self, line):
#Assume that each PR line can contain multiple ";" separated rules
rules = self._clean(line).split(";")
self.data.prorules.extend(rules)
def documentation(self, line):
self.data.pdoc = self._chomp(self._clean(line))
def terminator(self, line):
self.finished = True
def _chomp(self, word, to_chomp='.,;'):
# Remove the punctuation at the end of a word.
if word[-1] in to_chomp:
return word[:-1]
return word
def _clean(self, line, rstrip=1):
# Clean up a line.
if rstrip:
return line[5:].rstrip()
return line[5:]
def scan_sequence_expasy(seq=None, id=None, exclude_frequent=None):
"""scan_sequence_expasy(seq=None, id=None, exclude_frequent=None) ->
list of PatternHit's
Search a sequence for occurrences of Prosite patterns. You can
specify either a sequence in seq or a SwissProt/trEMBL ID or accession
in id. Only one of those should be given. If exclude_frequent
is true, then the patterns with the high probability of occurring
will be excluded.
"""
from Bio import ExPASy
if (seq and id) or not (seq or id):
raise ValueError, "Please specify either a sequence or an id"
handle = ExPASy.scanprosite1(seq, id, exclude_frequent)
return _extract_pattern_hits(handle)
def _extract_pattern_hits(handle):
"""_extract_pattern_hits(handle) -> list of PatternHit's
Extract hits from a web page. Raises a ValueError if there
was an error in the query.
"""
class parser(sgmllib.SGMLParser):
def __init__(self):
sgmllib.SGMLParser.__init__(self)
self.hits = []
self.broken_message = 'Some error occurred'
self._in_pre = 0
self._current_hit = None
self._last_found = None # Save state of parsing
def handle_data(self, data):
if data.find('try again') >= 0:
self.broken_message = data
return
elif data == 'illegal':
self.broken_message = 'Sequence contains illegal characters'
return
if not self._in_pre:
return
elif not data.strip():
return
if self._last_found is None and data[:4] == 'PDOC':
self._current_hit.pdoc = data
self._last_found = 'pdoc'
elif self._last_found == 'pdoc':
if data[:2] != 'PS':
raise ValueError, "Expected accession but got:\n%s" % data
self._current_hit.accession = data
self._last_found = 'accession'
elif self._last_found == 'accession':
self._current_hit.name = data
self._last_found = 'name'
elif self._last_found == 'name':
self._current_hit.description = data
self._last_found = 'description'
elif self._last_found == 'description':
m = re.findall(r'(\d+)-(\d+) (\w+)', data)
for start, end, seq in m:
self._current_hit.matches.append(
(int(start), int(end), seq))
def do_hr(self, attrs):
# <HR> inside a <PRE> section means a new hit.
if self._in_pre:
self._current_hit = PatternHit()
self.hits.append(self._current_hit)
self._last_found = None
def start_pre(self, attrs):
self._in_pre = 1
self.broken_message = None # Probably not broken
def end_pre(self):
self._in_pre = 0
p = parser()
p.feed(handle.read())
if p.broken_message:
raise ValueError, p.broken_message
return p.hits
def index_file(filename, indexname, rec2key=None):
"""index_file(filename, indexname, rec2key=None)
Index a Prosite file. filename is the name of the file.
indexname is the name of the dictionary. rec2key is an
optional callback that takes a Record and generates a unique key
(e.g. the accession number) for the record. If not specified,
the id name will be used.
"""
if not os.path.exists(filename):
raise ValueError, "%s does not exist" % filename
index = Index.Index(indexname, truncate=1)
index[Dictionary._Dictionary__filename_key] = filename
iter = Iterator(open(filename), parser=RecordParser())
while 1:
start = iter._uhandle.tell()
rec = iter.next()
length = iter._uhandle.tell() - start
if rec is None:
break
if rec2key is not None:
key = rec2key(rec)
else:
key = rec.name
if not key:
raise KeyError, "empty key was produced"
elif index.has_key(key):
raise KeyError, "duplicate key %s found" % key
index[key] = start, length
def _extract_record(handle):
"""_extract_record(handle) -> str
Extract PROSITE data from a web page. Raises a ValueError if no
data was found in the web page.
"""
# All the data appears between tags:
# <pre width = 80>ID NIR_SIR; PATTERN.
# </PRE>
class parser(sgmllib.SGMLParser):
def __init__(self):
sgmllib.SGMLParser.__init__(self)
self._in_pre = 0
self.data = []
def handle_data(self, data):
if self._in_pre:
self.data.append(data)
def do_br(self, attrs):
if self._in_pre:
self.data.append('\n')
def start_pre(self, attrs):
self._in_pre = 1
def end_pre(self):
self._in_pre = 0
p = parser()
p.feed(handle.read())
if not p.data:
raise ValueError, "No data found in web page."
return "".join(p.data)
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/Prosite/__init__.py
|
Python
|
apache-2.0
| 31,020
|
[
"Biopython"
] |
948b0d829410d1c676b27f4b0aba5463a81f2c97811dadd41cfa736bafa69df7
|
import collections
import pysam
import os
import gzip
CHR = 0
POS = 1
REF = 3
ALT = 4
FMT = 8
GT = 'GT'
REF_ALIAS ='.'
ALT_FS = ','
FS = '\t'
FMT_FS = ':'
VERBOSITY = 0
__all__ = ['parseFormat', 'getGenotype', 'parseGenotype',
'VCFIterator', 'VCFReader']
def parseFormat(fmt, data):
formatFields = fmt.split(FMT_FS)
dataFields = data.split(FMT_FS)
ret = dict()
for i in range(min(len(formatFields), len(dataFields))):
ret[formatFields[i]] = dataFields[i]
return ret
def getGenotype(genotypeStr):
if '/' in genotypeStr:
return genotypeStr.split('/')
if '|' in genotypeStr:
return genotypeStr.split('|')
return [genotypeStr]
def parseGenotype(ref, alt, genotype):
altFields = alt.split(ALT_FS)
if genotype == '0':
return ref
else:
return altFields[int(genotype) - 1]
class VCFIterator(collections.Iterator):
def __init__(self, parent, fetched):
self.parent = parent
self.fetched = fetched
def __iter__(self):
return self
def next(self):
try:
while True:
line = self.fetched.next().rstrip()
fields = line.split(FS)
if len(fields) != self.parent.nColumns:
raise ValueError("Number of columns not consistent. (%s)" %
line)
genotypes = []
for i in self.parent.sampleIndexes:
fmtFields = parseFormat(fields[FMT], fields[i])
genotype = getGenotype(fmtFields[GT])
if len(set(genotype)) > 1:
if VERBOSITY > 1:
print("Hets found in %s:%s of sample %s (%s). " %
(fields[CHR], fields[POS],
self.parent.samples[i], fmtFields[GT]) +
"Use the first allele.")
genotype = genotype[0]
if genotype == REF_ALIAS:
genotype = '0'
genotypes.append(genotype)
# For only one sample, append a dumb ref genotype to compare
if len(self.parent.sampleIndexes) == 1:
genotypes.append('0')
isVariant = False
for i in range(len(genotypes)-1):
if genotypes[i] != genotypes[i+1]:
isVariant = True
break
if not isVariant:
continue
# Remove the dumb ref genotype
if len(self.parent.sampleIndexes) == 1:
del genotypes[-1]
ret = []
ret.append(fields[CHR])
ret.append(int(fields[POS])-1) # convert to 0-based
ret.append(fields[REF])
ret.extend([parseGenotype(fields[REF], fields[ALT], genotype) for genotype in genotypes])
return ret
except StopIteration:
raise StopIteration()
class VCFReader():
def __init__(self, fileName, samples):
self.samples = samples
self.sampleIndexes = []
self.nColumns = 0
# Compress with bgzip
if not fileName.endswith('.gz'):
if not os.path.isfile(fileName+'.gz'):
pysam.tabix_compress(fileName, fileName+'.gz')
fileName += '.gz'
# Build tabix index
if not os.path.isfile(fileName+'.tbi'):
pysam.tabix_index(fileName, preset='vcf')
nLines = 0
fp = gzip.open(fileName, 'r')
line = fp.readline()
while line:
nLines += 1
if line.startswith('##'):
line = fp.readline()
elif line.startswith('#'): # Header line
break
else:
line = None # Content line, no header line found
else:
raise ValueError("Header not found.")
# Get the column index of selected samples
headers = line[1:].rstrip().split(FS)
self.nColumns = len(headers)
if self.nColumns <= 9:
raise ValueError("Not enough columns in header.")
for name in self.samples:
if name in headers[9:]:
self.sampleIndexes.append(headers.index(name))
else:
raise ValueError("Sample %s not found in header." % name)
self.tabix = pysam.Tabixfile(fileName)
self.chroms = self.tabix.contigs
self.fileName = fileName
def fetch(self, region):
return VCFIterator(self, self.tabix.fetch(region=region))
|
andrewparkermorgan/lapels
|
modtools/vcfreader.py
|
Python
|
mit
| 5,326
|
[
"pysam"
] |
0c7525273168a4a107c06f2e5c2e25e4672007e78872a6c516d1510d83342b8a
|
"""
Topological fingerprints for macromolecular structures.
"""
import numpy as np
import logging
import itertools
from deepchem.utils.hash_utils import hash_ecfp
from deepchem.feat import ComplexFeaturizer
from deepchem.utils.rdkit_utils import load_complex
from deepchem.utils.hash_utils import vectorize
from deepchem.utils.voxel_utils import voxelize
from deepchem.utils.voxel_utils import convert_atom_to_voxel
from deepchem.utils.rdkit_utils import compute_all_ecfp
from deepchem.utils.rdkit_utils import compute_contact_centroid
from deepchem.utils.rdkit_utils import MoleculeLoadException
from deepchem.utils.geometry_utils import compute_pairwise_distances
from deepchem.utils.geometry_utils import subtract_centroid
from typing import Tuple, Dict, List
logger = logging.getLogger(__name__)
def featurize_contacts_ecfp(
frag1: Tuple,
frag2: Tuple,
pairwise_distances: np.ndarray = None,
cutoff: float = 4.5,
ecfp_degree: int = 2) -> Tuple[Dict[int, str], Dict[int, str]]:
"""Computes ECFP dicts for pairwise interaction between two molecular fragments.
Parameters
----------
frag1: Tuple
A tuple of (coords, mol) returned by `load_molecule`.
frag2: Tuple
A tuple of (coords, mol) returned by `load_molecule`.
pairwise_distances: np.ndarray
Array of pairwise fragment-fragment distances (Angstroms)
cutoff: float
Cutoff distance for contact consideration
ecfp_degree: int
ECFP radius
Returns
-------
Tuple of dictionaries of ECFP contact fragments
"""
if pairwise_distances is None:
pairwise_distances = compute_pairwise_distances(frag1[0], frag2[0])
# contacts is of form (x_coords, y_coords), a tuple of 2 lists
contacts = np.nonzero((pairwise_distances < cutoff))
# contacts[0] is the x_coords, that is the frag1 atoms that have
# nonzero contact.
frag1_atoms = set([int(c) for c in contacts[0].tolist()])
# contacts[1] is the y_coords, the frag2 atoms with nonzero contacts
frag2_atoms = set([int(c) for c in contacts[1].tolist()])
frag1_ecfp_dict = compute_all_ecfp(
frag1[1], indices=frag1_atoms, degree=ecfp_degree)
frag2_ecfp_dict = compute_all_ecfp(
frag2[1], indices=frag2_atoms, degree=ecfp_degree)
return (frag1_ecfp_dict, frag2_ecfp_dict)
class ContactCircularFingerprint(ComplexFeaturizer):
"""Compute (Morgan) fingerprints near contact points of macromolecular complexes.
Given a macromolecular complex made up of multiple
constituent molecules, first compute the contact points where
atoms from different molecules come close to one another. For
atoms within "contact regions," compute radial "ECFP"
fragments which are sub-molecules centered at atoms in the
contact region.
For a macromolecular complex, returns a vector of shape
`(2*size,)`
"""
def __init__(self, cutoff: float = 4.5, radius: int = 2, size: int = 8):
"""
Parameters
----------
cutoff: float (default 4.5)
Distance cutoff in angstroms for molecules in complex.
radius: int, optional (default 2)
Fingerprint radius.
size: int, optional (default 8)
Length of generated bit vector.
"""
self.cutoff = cutoff
self.radius = radius
self.size = size
def _featurize(self, mol_pdb: str, protein_pdb: str):
"""
Compute featurization for a molecular complex
Parameters
----------
mol_pdb: str
Filename for ligand molecule
protein_pdb: str
Filename for protein molecule
"""
try:
fragments = load_complex((mol_pdb, protein_pdb), add_hydrogens=False)
except MoleculeLoadException:
logger.warning("This molecule cannot be loaded by Rdkit. Returning None")
return None
pairwise_features = []
# We compute pairwise contact fingerprints
for (frag1, frag2) in itertools.combinations(fragments, 2):
# Get coordinates
distances = compute_pairwise_distances(frag1[0], frag2[0])
vector = [
vectorize(hash_ecfp, feature_dict=ecfp_dict, size=self.size)
for ecfp_dict in featurize_contacts_ecfp(
frag1,
frag2,
distances,
cutoff=self.cutoff,
ecfp_degree=self.radius)
]
pairwise_features += vector
pairwise_features = np.concatenate(pairwise_features)
return pairwise_features
class ContactCircularVoxelizer(ComplexFeaturizer):
"""Computes ECFP fingerprints on a voxel grid.
Given a macromolecular complex made up of multiple
constituent molecules, first compute the contact points where
atoms from different molecules come close to one another. For
atoms within "contact regions," compute radial "ECFP"
fragments which are sub-molecules centered at atoms in the
contact region. Localize these ECFP fingeprints at the voxel
in which they originated.
Featurizes a macromolecular complex into a tensor of shape
`(voxels_per_edge, voxels_per_edge, voxels_per_edge, size)` where
`voxels_per_edge = int(box_width/voxel_width)`. If `flatten==True`,
then returns a flattened version of this tensor of length
`size*voxels_per_edge**3`
"""
def __init__(self,
cutoff: float = 4.5,
radius: int = 2,
size: int = 8,
box_width: float = 16.0,
voxel_width: float = 1.0,
flatten: bool = False):
"""
Parameters
----------
cutoff: float (default 4.5)
Distance cutoff in angstroms for molecules in complex.
radius : int, optional (default 2)
Fingerprint radius.
size : int, optional (default 8)
Length of generated bit vector.
box_width: float, optional (default 16.0)
Size of a box in which voxel features are calculated. Box
is centered on a ligand centroid.
voxel_width: float, optional (default 1.0)
Size of a 3D voxel in a grid.
flatten: bool, optional (default False)
If True, then returns a flat feature vector rather than voxel grid. This
feature vector is constructed by flattening the usual voxel grid.
"""
self.cutoff = cutoff
self.radius = radius
self.size = size
self.box_width = box_width
self.voxel_width = voxel_width
self.voxels_per_edge = int(self.box_width / self.voxel_width)
self.flatten = flatten
def _featurize(self, mol_pdb: str, protein_pdb: str):
"""
Compute featurization for a molecular complex
Parameters
----------
mol_pdb: str
Filename for ligand molecule
protein_pdb: str
Filename for protein molecule
"""
molecular_complex = (mol_pdb, protein_pdb)
try:
fragments = load_complex(molecular_complex, add_hydrogens=False)
except MoleculeLoadException:
logger.warning("This molecule cannot be loaded by Rdkit. Returning None")
return None
pairwise_features: List[np.ndarray] = []
# We compute pairwise contact fingerprints
centroid = compute_contact_centroid(fragments, cutoff=self.cutoff)
for (frag1, frag2) in itertools.combinations(fragments, 2):
distances = compute_pairwise_distances(frag1[0], frag2[0])
frag1_xyz = subtract_centroid(frag1[0], centroid)
frag2_xyz = subtract_centroid(frag2[0], centroid)
xyzs = [frag1_xyz, frag2_xyz]
pairwise_features.append(
sum([
voxelize(
convert_atom_to_voxel,
xyz,
self.box_width,
self.voxel_width,
hash_function=hash_ecfp,
feature_dict=ecfp_dict,
nb_channel=self.size) for xyz, ecfp_dict in zip(
xyzs,
featurize_contacts_ecfp(
frag1,
frag2,
distances,
cutoff=self.cutoff,
ecfp_degree=self.radius))
]))
if self.flatten:
return np.concatenate(
[features.flatten() for features in pairwise_features])
else:
# Features are of shape (voxels_per_edge, voxels_per_edge,
# voxels_per_edge, num_feat) so we should concatenate on the last
# axis.
return np.concatenate(pairwise_features, axis=-1)
|
lilleswing/deepchem
|
deepchem/feat/complex_featurizers/contact_fingerprints.py
|
Python
|
mit
| 8,254
|
[
"RDKit"
] |
369779e06d7f9a9f0bfac589501d4a0210922174cf0ea0a1c61695044a048735
|
# -*- coding: utf-8 -*-
#
# clopath_synapse_small_network.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Clopath Rule: Bidirectional connections
-----------------------------------------
This script simulates a small network of ten excitatory and three
inhibitory ``aeif_psc_delta_clopath`` neurons. The neurons are randomly connected
and driven by 500 Poisson generators. The synapses from the Poisson generators
to the excitatory population and those among the neurons of the network
are Clopath synapses. The rate of the Poisson generators is modulated with
a Gaussian profile whose center shifts randomly each 100 ms between ten
equally spaced positions.
This setup demonstrates that the Clopath synapse is able to establish
bidirectional connections. The example is adapted from [1]_ (cf. fig. 5).
References
~~~~~~~~~~~
.. [1] Clopath C, Büsing L, Vasilaki E, Gerstner W (2010). Connectivity reflects coding:
a model of voltage-based STDP with homeostasis.
Nature Neuroscience 13:3, 344--352
"""
import nest
import numpy as np
import matplotlib.pyplot as pl
import random
##############################################################################
# Set the parameters
simulation_time = 1.0e4
resolution = 0.1
delay = resolution
# Poisson_generator parameters
pg_A = 30. # amplitude of Gaussian
pg_sigma = 10. # std deviation
nest.ResetKernel()
nest.SetKernelStatus({'resolution': resolution})
# Create neurons and devices
nrn_model = 'aeif_psc_delta_clopath'
nrn_params = {'V_m': -30.6,
'g_L': 30.0,
'w': 0.0,
'tau_plus': 7.0,
'tau_minus': 10.0,
'tau_w': 144.0,
'a': 4.0,
'C_m': 281.0,
'Delta_T': 2.0,
'V_peak': 20.0,
't_clamp': 2.0,
'A_LTP': 8.0e-6,
'A_LTD': 14.0e-6,
'A_LTD_const': False,
'b': 0.0805,
'u_ref_squared': 60.0**2}
pop_exc = nest.Create(nrn_model, 10, nrn_params)
pop_inh = nest.Create(nrn_model, 3, nrn_params)
##############################################################################
# We need parrot neurons since Poisson generators can only be connected
# with static connections
pop_input = nest.Create('parrot_neuron', 500) # helper neurons
pg = nest.Create('poisson_generator', 500)
wr = nest.Create('weight_recorder', 1)
##############################################################################
# First connect Poisson generators to helper neurons
nest.Connect(pg, pop_input, 'one_to_one', {'model': 'static_synapse',
'weight': 1.0, 'delay': delay})
##############################################################################
# Create all the connections
nest.CopyModel('clopath_synapse', 'clopath_input_to_exc',
{'Wmax': 3.0})
conn_dict_input_to_exc = {'rule': 'all_to_all'}
syn_dict_input_to_exc = {'model': 'clopath_input_to_exc',
'weight': {'distribution': 'uniform', 'low': 0.5,
'high': 2.0},
'delay': delay}
nest.Connect(pop_input, pop_exc, conn_dict_input_to_exc,
syn_dict_input_to_exc)
# Create input->inh connections
conn_dict_input_to_inh = {'rule': 'all_to_all'}
syn_dict_input_to_inh = {'model': 'static_synapse',
'weight': {'distribution': 'uniform', 'low': 0.0,
'high': 0.5},
'delay': delay}
nest.Connect(pop_input, pop_inh, conn_dict_input_to_inh, syn_dict_input_to_inh)
# Create exc->exc connections
nest.CopyModel('clopath_synapse', 'clopath_exc_to_exc',
{'Wmax': 0.75, 'weight_recorder': wr[0]})
syn_dict_exc_to_exc = {'model': 'clopath_exc_to_exc', 'weight': 0.25,
'delay': delay}
conn_dict_exc_to_exc = {'rule': 'all_to_all', 'autapses': False}
nest.Connect(pop_exc, pop_exc, conn_dict_exc_to_exc, syn_dict_exc_to_exc)
# Create exc->inh connections
syn_dict_exc_to_inh = {'model': 'static_synapse',
'weight': 1.0, 'delay': delay}
conn_dict_exc_to_inh = {'rule': 'fixed_indegree', 'indegree': 8}
nest.Connect(pop_exc, pop_inh, conn_dict_exc_to_inh, syn_dict_exc_to_inh)
# Create inh->exc connections
syn_dict_inh_to_exc = {'model': 'static_synapse',
'weight': 1.0, 'delay': delay}
conn_dict_inh_to_exc = {'rule': 'fixed_outdegree', 'outdegree': 6}
nest.Connect(pop_inh, pop_exc, conn_dict_inh_to_exc, syn_dict_inh_to_exc)
##############################################################################
# Randomize the initial membrane potential
for nrn in pop_exc:
nest.SetStatus([nrn, ], {'V_m': np.random.normal(-60.0, 25.0)})
for nrn in pop_inh:
nest.SetStatus([nrn, ], {'V_m': np.random.normal(-60.0, 25.0)})
##############################################################################
# Simulation divided into intervals of 100ms for shifting the Gaussian
for i in range(int(simulation_time/100.0)):
# set rates of poisson generators
rates = np.empty(500)
# pg_mu will be randomly chosen out of 25,75,125,...,425,475
pg_mu = 25 + random.randint(0, 9) * 50
for j in range(500):
rates[j] = pg_A * \
np.exp((-1 * (j - pg_mu) ** 2) / (2 * (pg_sigma) ** 2))
nest.SetStatus([pg[j]], {'rate': rates[j]*1.75})
nest.Simulate(100.0)
##############################################################################
# Plot results
fig1, axA = pl.subplots(1, sharex=False)
# Plot synapse weights of the synapses within the excitatory population
# Sort weights according to sender and reshape
exc_conns = nest.GetConnections(pop_exc, pop_exc)
exc_conns_senders = np.array(nest.GetStatus(exc_conns, 'source'))
exc_conns_targets = np.array(nest.GetStatus(exc_conns, 'target'))
exc_conns_weights = np.array(nest.GetStatus(exc_conns, 'weight'))
idx_array = np.argsort(exc_conns_senders)
targets = np.reshape(exc_conns_targets[idx_array], (10, 10-1))
weights = np.reshape(exc_conns_weights[idx_array], (10, 10-1))
# Sort according to target
for i, (trgs, ws) in enumerate(zip(targets, weights)):
idx_array = np.argsort(trgs)
weights[i] = ws[idx_array]
weight_matrix = np.zeros((10, 10))
tu9 = np.triu_indices_from(weights)
tl9 = np.tril_indices_from(weights, -1)
tu10 = np.triu_indices_from(weight_matrix, 1)
tl10 = np.tril_indices_from(weight_matrix, -1)
weight_matrix[tu10[0], tu10[1]] = weights[tu9[0], tu9[1]]
weight_matrix[tl10[0], tl10[1]] = weights[tl9[0], tl9[1]]
# Difference between initial and final value
init_w_matrix = np.ones((10, 10))*0.25
init_w_matrix -= np.identity(10)*0.25
caxA = axA.imshow(weight_matrix - init_w_matrix)
cbarB = fig1.colorbar(caxA, ax=axA)
axA.set_xticks([0, 2, 4, 6, 8])
axA.set_xticklabels(['1', '3', '5', '7', '9'])
axA.set_yticks([0, 2, 4, 6, 8])
axA.set_xticklabels(['1', '3', '5', '7', '9'])
axA.set_xlabel("to neuron")
axA.set_ylabel("from neuron")
axA.set_title("Change of syn weights before and after simulation")
pl.show()
|
hakonsbm/nest-simulator
|
pynest/examples/clopath_synapse_small_network.py
|
Python
|
gpl-2.0
| 7,727
|
[
"Gaussian",
"NEURON"
] |
b74858f23bc3b7603a4d4ad9e8c3a981b2dec93997a878e1d167ccbe1d9e806c
|
from jobman import DD, expand, flatten
import pynet.layer as layer
from pynet.model import *
from pynet.layer import *
from pynet.datasets.mnist import Mnist, Mnist_Blocks
import pynet.datasets.spec as spec
import pynet.datasets.mnist as mnist
import pynet.datasets.transfactor as tf
import pynet.datasets.mapping as mapping
import pynet.learning_method as learning_methods
from pynet.learning_rule import LearningRule
from pynet.log import Log
from pynet.train_object import TrainObject
from pynet.cost import Cost
import pynet.datasets.preprocessor as preproc
import pynet.datasets.dataset_noise as noisy
import pynet.layer_noise as layer_noise
import cPickle
import os
import theano
from theano.sandbox.cuda.var import CudaNdarraySharedVariable
floatX = theano.config.floatX
class AE:
def __init__(self, state):
self.state = state
def run(self):
log = self.build_log()
dataset = self.build_dataset()
learning_rule = self.build_learning_rule()
model = self.build_model(dataset)
train_obj = TrainObject(log = log,
dataset = dataset,
learning_rule = learning_rule,
model = model)
train_obj.run()
def build_log(self, save_to_database=None, id=None):
log = Log(experiment_name = id is not None and '%s_%s'%(self.state.log.experiment_name,id) \
or self.state.log.experiment_name,
description = self.state.log.description,
save_outputs = self.state.log.save_outputs,
save_learning_rule = self.state.log.save_learning_rule,
save_model = self.state.log.save_model,
save_epoch_error = self.state.log.save_epoch_error,
save_to_database = save_to_database)
return log
def build_dataset(self):
dataset = None
preprocessor = None if self.state.dataset.preprocessor.type is None else \
getattr(preproc, self.state.dataset.preprocessor.type)()
# if self.state.dataset.noise.type == 'BlackOut' or self.state.dataset.noise.type == 'MaskOut':
# noise = None if self.state.dataset.noise.type is None else \
# getattr(noisy, self.state.dataset.noise.type)(ratio=self.state.dataset.noise.ratio)
# else:
# noise = getattr(noisy, self.state.dataset.noise.type)()
noise = None if self.state.dataset.dataset_noise.type is None else \
getattr(noisy, self.state.dataset.dataset_noise.type)()
if self.state.dataset.preprocessor.type == 'Scale':
preprocessor.max = self.state.dataset.preprocessor.global_max
preprocessor.min = self.state.dataset.preprocessor.global_min
preprocessor.buffer = self.state.dataset.preprocessor.buffer
preprocessor.scale_range = self.state.dataset.preprocessor.scale_range
if self.state.dataset.type == 'Mnist':
dataset = Mnist(train_valid_test_ratio = self.state.dataset.train_valid_test_ratio,
preprocessor = preprocessor,
noise = noise,
batch_size = self.state.dataset.batch_size,
num_batches = self.state.dataset.num_batches,
iter_class = self.state.dataset.iter_class,
rng = self.state.dataset.rng)
train = dataset.get_train()
dataset.set_train(train.X, train.X)
valid = dataset.get_valid()
dataset.set_valid(valid.X, valid.X)
test = dataset.get_test()
dataset.set_test(test.X, test.X)
elif self.state.dataset.type[:12] == 'Mnist_Blocks':
dataset = getattr(mnist, self.state.dataset.type)(
feature_size = self.state.dataset.feature_size,
target_size = self.state.dataset.feature_size,
train_valid_test_ratio = self.state.dataset.train_valid_test_ratio,
preprocessor = preprocessor,
noise = noise,
batch_size = self.state.dataset.batch_size,
num_batches = self.state.dataset.num_batches,
iter_class = self.state.dataset.iter_class,
rng = self.state.dataset.rng)
elif self.state.dataset.type[:4] == 'P276':
dataset = getattr(spec, self.state.dataset.type)(
train_valid_test_ratio = self.state.dataset.train_valid_test_ratio,
preprocessor = preprocessor,
noise = noise,
batch_size = self.state.dataset.batch_size,
num_batches = self.state.dataset.num_batches,
iter_class = self.state.dataset.iter_class,
rng = self.state.dataset.rng)
train = dataset.get_train()
dataset.set_train(train.X, train.X)
valid = dataset.get_valid()
dataset.set_valid(valid.X, valid.X)
test = dataset.get_test()
dataset.set_test(test.X, test.X)
elif self.state.dataset.type[:5] == 'Laura':
dataset = getattr(spec, self.state.dataset.type)(
feature_size = self.state.dataset.feature_size,
target_size = self.state.dataset.feature_size,
train_valid_test_ratio = self.state.dataset.train_valid_test_ratio,
preprocessor = preprocessor,
noise = noise,
batch_size = self.state.dataset.batch_size,
num_batches = self.state.dataset.num_batches,
iter_class = self.state.dataset.iter_class,
rng = self.state.dataset.rng)
elif self.state.dataset.type[:18] == 'TransFactor_Blocks':
dataset = getattr(tf, self.state.dataset.type)(
feature_size = self.state.dataset.feature_size,
target_size = self.state.dataset.feature_size,
train_valid_test_ratio = self.state.dataset.train_valid_test_ratio,
preprocessor = preprocessor,
noise = noise,
batch_size = self.state.dataset.batch_size,
num_batches = self.state.dataset.num_batches,
iter_class = self.state.dataset.iter_class,
rng = self.state.dataset.rng)
elif self.state.dataset.type[:11] == 'TransFactor':
dataset = getattr(tf, self.state.dataset.type)(
# feature_size = self.state.dataset.feature_size,
# target_size = self.state.dataset.feature_size,
train_valid_test_ratio = self.state.dataset.train_valid_test_ratio,
preprocessor = preprocessor,
noise = noise,
batch_size = self.state.dataset.batch_size,
num_batches = self.state.dataset.num_batches,
iter_class = self.state.dataset.iter_class,
rng = self.state.dataset.rng)
train = dataset.get_train()
dataset.set_train(train.X, train.X)
valid = dataset.get_valid()
dataset.set_valid(valid.X, valid.X)
test = dataset.get_test()
dataset.set_test(test.X, test.X)
return dataset
def build_learning_method(self):
if self.state.learning_method.type == 'SGD':
learn_method = getattr(learning_methods,
self.state.learning_method.type)(
learning_rate = self.state.learning_method.learning_rate,
momentum = self.state.learning_method.momentum)
elif self.state.learning_method.type == 'AdaGrad':
learn_method = getattr(learning_methods,
self.state.learning_method.type)(
learning_rate = self.state.learning_method.learning_rate,
momentum = self.state.learning_method.momentum)
elif self.state.learning_method.type == 'AdaDelta':
learn_method = getattr(learning_methods,
self.state.learning_method.type)(
rho = self.state.learning_method.rho,
eps = self.state.learning_method.eps)
else:
raise TypeError("not SGD, AdaGrad or AdaDelta")
return learn_method
def build_learning_rule(self):
learning_rule = LearningRule(max_col_norm = self.state.learning_rule.max_col_norm,
L1_lambda = self.state.learning_rule.L1_lambda,
L2_lambda = self.state.learning_rule.L2_lambda,
training_cost = Cost(type = self.state.learning_rule.cost),
stopping_criteria = {'max_epoch' : self.state.learning_rule.stopping_criteria.max_epoch,
'epoch_look_back' : self.state.learning_rule.stopping_criteria.epoch_look_back,
'cost' : Cost(type=self.state.learning_rule.stopping_criteria.cost),
'percent_decrease' : self.state.learning_rule.stopping_criteria.percent_decrease})
return learning_rule
def build_one_hid_model(self, input_dim):
model = AutoEncoder(input_dim=input_dim, rand_seed=self.state.model.rand_seed)
h1_noise = None if self.state.hidden1.layer_noise.type is None else \
getattr(layer_noise, self.state.hidden1.layer_noise.type)()
if self.state.hidden1.layer_noise.type in ['BlackOut', 'MaskOut', 'BatchOut']:
h1_noise.ratio = self.state.hidden1.layer_noise.ratio
elif self.state.hidden1.layer_noise.type == 'Gaussian':
h1_noise.std = self.state.hidden1.layer_noise.std
h1_noise.mean = self.state.hidden1.layer_noise.mean
hidden1 = getattr(layer, self.state.hidden1.type)(dim=self.state.hidden1.dim,
name=self.state.hidden1.name,
dropout_below=self.state.hidden1.dropout_below,
noise=h1_noise)
# blackout_below=self.state.hidden1.blackout_below)
model.add_encode_layer(hidden1)
h1_mirror = getattr(layer, self.state.h1_mirror.type)(dim=input_dim,
name=self.state.h1_mirror.name,
W=hidden1.W.T,
dropout_below=self.state.h1_mirror.dropout_below)
# blackout_below=self.state.h1_mirror.blackout_below)
model.add_decode_layer(h1_mirror)
return model
def build_one_hid_model_no_transpose(self, input_dim):
model = AutoEncoder(input_dim = input_dim, rand_seed=self.state.model.rand_seed)
hidden1 = getattr(layer, self.state.hidden1.type)(dim=self.state.hidden1.dim,
name=self.state.hidden1.name,
dropout_below=self.state.hidden1.dropout_below,
noise=None if self.state.hidden1.layer_noise is None else \
getattr(layer_noise, self.state.hidden1.layer_noise)())
# blackout_below=self.state.hidden1.blackout_below)
model.add_encode_layer(hidden1)
h1_mirror = getattr(layer, self.state.h1_mirror.type)(dim=input_dim,
name=self.state.h1_mirror.name,
dropout_below=self.state.h1_mirror.dropout_below)
# blackout_below=self.state.h1_mirror.blackout_below)
model.add_decode_layer(h1_mirror)
return model
def build_two_hid_model(self, input_dim):
model = AutoEncoder(input_dim=input_dim, rand_seed=self.state.model.rand_seed)
hidden1 = getattr(layer, self.state.hidden1.type)(dim=self.state.hidden1.dim,
name=self.state.hidden1.name,
dropout_below=self.state.hidden1.dropout_below,
noise=None if self.state.hidden1.layer_noise is None else \
getattr(layer_noise, self.state.hidden1.layer_noise)())
# blackout_below=self.state.hidden1.blackout_below)
model.add_encode_layer(hidden1)
hidden2 = getattr(layer, self.state.hidden2.type)(dim=self.state.hidden2.dim,
name=self.state.hidden2.name,
dropout_below=self.state.hidden2.dropout_below,
noise=None if self.state.hidden2.layer_noise is None else \
getattr(layer_noise, self.state.hidden2.layer_noise)())
# blackout_below=self.state.hidden2.blackout_below)
model.add_encode_layer(hidden2)
hidden2_mirror = getattr(layer, self.state.h2_mirror.type)(dim=self.state.hidden1.dim,
name=self.state.h2_mirror.name,
dropout_below=self.state.h2_mirror.dropout_below,
# blackout_below=self.state.h2_mirror.blackout_below,
W = hidden2.W.T)
model.add_decode_layer(hidden2_mirror)
hidden1_mirror = getattr(layer, self.state.h1_mirror.type)(dim=input_dim,
name=self.state.h1_mirror.name,
dropout_below=self.state.h1_mirror.dropout_below,
# blackout_below=self.state.h1_mirror.blackout_below,
W = hidden1.W.T)
model.add_decode_layer(hidden1_mirror)
return model
def build_database(self, dataset, learning_rule, learning_method, model):
save_to_database = {'name' : self.state.log.save_to_database_name,
'records' : {'Dataset' : dataset.__class__.__name__,
'max_col_norm' : learning_rule.max_col_norm,
'Weight_Init_Seed' : model.rand_seed,
'Dropout_Below' : str([layer.dropout_below for layer in model.layers]),
'Learning_Method' : learning_method.__class__.__name__,
'Batch_Size' : dataset.batch_size,
'Dataset_Noise' : dataset.noise.__class__.__name__,
'Dataset_Dir' : dataset.data_dir,
'Feature_Size' : dataset.feature_size(),
'nblocks' : dataset.nblocks(),
'Layer_Types' : str([layer.__class__.__name__ for layer in model.layers]),
'Layer_Dim' : str([layer.dim for layer in model.layers]),
'Preprocessor' : dataset.preprocessor.__class__.__name__,
'Training_Cost' : learning_rule.cost.type,
'Stopping_Cost' : learning_rule.stopping_criteria['cost'].type}
}
if learning_method.__class__.__name__ == "SGD":
save_to_database["records"]["Learning_rate"] = learning_method.learning_rate
save_to_database["records"]["Momentum"] = learning_method.momentum
elif learning_method.__class__.__name__ == "AdaGrad":
save_to_database["records"]["Learning_rate"] = learning_method.learning_rate
save_to_database["records"]["Momentum"] = learning_method.momentum
elif learning_method.__class__.__name__ == "AdaDelta":
save_to_database["records"]["rho"] = learning_method.rho
save_to_database["records"]["eps"] = learning_method.eps
else:
raise TypeError("not SGD, AdaGrad or AdaDelta")
layer_noise = []
layer_noise_params = []
for layer in model.layers:
layer_noise.append(layer.noise.__class__.__name__)
if layer.noise.__class__.__name__ in ['BlackOut', 'MaskOut', 'BatchOut']:
layer_noise_params.append(layer.noise.ratio)
elif layer.noise.__class__.__name__ is 'Gaussian':
layer_noise_params.append((layer.noise.mean, layer.noise.std))
else:
layer_noise_params.append(None)
save_to_database["records"]["Layer_Noise"] = str(layer_noise)
save_to_database["records"]["Layer_Noise_Params"] = str(layer_noise_params)
return save_to_database
class AE_Testing(AE):
def __init__(self, state):
self.state = state
def run(self):
dataset = self.build_dataset()
learning_rule = self.build_learning_rule()
learn_method = self.build_learning_method()
model = self.build_one_hid_model(dataset.feature_size())
if self.state.log.save_to_database_name:
database = self.build_database(dataset, learning_rule, learn_method, model)
log = self.build_log(database)
train_obj = TrainObject(log = log,
dataset = dataset,
learning_rule = learning_rule,
learning_method = learn_method,
model = model)
train_obj.run()
# fine tuning
log.info("fine tuning")
train_obj.model.layers[0].dropout_below = None
train_obj.setup()
train_obj.run()
class Laura_Mapping(AE):
def __init__(self, state):
self.state = state
def run(self):
preprocessor = None if self.state.dataset.preprocessor is None else \
getattr(preproc, self.state.dataset.preprocessor)()
dataset = getattr(mapping, self.state.dataset.type)(feature_size = self.state.dataset.feature_size,
target_size = self.state.dataset.target_size,
train_valid_test_ratio = self.state.dataset.train_valid_test_ratio,
preprocessor = preprocessor,
batch_size = self.state.dataset.batch_size,
num_batches = self.state.dataset.num_batches,
iter_class = self.state.dataset.iter_class,
rng = self.state.dataset.rng)
model = MLP(input_dim = self.state.dataset.feature_size, rand_seed=self.state.model.rand_seed)
hidden1 = getattr(layer, self.state.hidden1.type)(dim=self.state.hidden1.dim,
name=self.state.hidden1.name,
dropout_below=self.state.hidden1.dropout_below)
model.add_layer(hidden1)
hidden2 = getattr(layer, self.state.hidden2.type)(dim=self.state.hidden2.dim,
name=self.state.hidden2.name,
dropout_below=self.state.hidden2.dropout_below)
model.add_layer(hidden2)
output = getattr(layer, self.state.output.type)(dim=self.state.output.dim,
name=self.state.output.name,
dropout_below=self.state.output.dropout_below)
model.add_layer(output)
learning_rule = self.build_learning_rule()
learn_method = self.build_learning_method()
database = self.build_database(dataset, learning_rule, learn_method, model)
log = self.build_log(database)
train_obj = TrainObject(log = log,
dataset = dataset,
learning_rule = learning_rule,
learning_method = learn_method,
model = model)
train_obj.run()
class Laura(AE):
def __init__(self, state):
self.state = state
def run(self):
dataset = self.build_dataset()
learning_rule = self.build_learning_rule()
learn_method = self.build_learning_method()
# import pdb
# pdb.set_trace()
if self.state.num_layers == 1:
model = self.build_one_hid_model(dataset.feature_size())
elif self.state.num_layers == 2:
model = self.build_two_hid_model(dataset.feature_size())
else:
raise ValueError()
database = self.build_database(dataset, learning_rule, learn_method, model)
log = self.build_log(database)
dataset.log = log
train_obj = TrainObject(log = log,
dataset = dataset,
learning_rule = learning_rule,
learning_method = learn_method,
model = model)
train_obj.run()
log.info("Fine Tuning")
for layer in train_obj.model.layers:
layer.dropout_below = None
layer.noise = None
train_obj.setup()
train_obj.run()
class Laura_Continue(AE):
def __init__(self, state):
self.state = state
def build_model(self):
with open(os.environ['PYNET_SAVE_PATH'] + '/log/'
+ self.state.hidden1.model + '/model.pkl') as f1:
model = cPickle.load(f1)
return model
def run(self):
dataset = self.build_dataset()
learning_rule = self.build_learning_rule()
learn_method = self.build_learning_method()
model = self.build_model()
if self.state.fine_tuning_only:
for layer in model.layers:
layer.dropout_below = None
layer.noise = None
print "Fine Tuning Only"
if self.state.log.save_to_database_name:
database = self.build_database(dataset, learning_rule, learn_method, model)
database['records']['model'] = self.state.hidden1.model
log = self.build_log(database)
train_obj = TrainObject(log = log,
dataset = dataset,
learning_rule = learning_rule,
learning_method = learn_method,
model = model)
train_obj.run()
if not self.state.fine_tuning_only:
log.info("..Fine Tuning after Noisy Training")
for layer in train_obj.model.layers:
layer.dropout_below = None
layer.noise = None
train_obj.setup()
train_obj.run()
class Laura_Two_Layers(AE):
def __init__(self, state):
self.state = state
def build_model(self, input_dim):
with open(os.environ['PYNET_SAVE_PATH'] + '/log/'
+ self.state.hidden1.model + '/model.pkl') as f1:
model1 = cPickle.load(f1)
with open(os.environ['PYNET_SAVE_PATH'] + '/log/'
+ self.state.hidden2.model + '/model.pkl') as f2:
model2 = cPickle.load(f2)
model = AutoEncoder(input_dim=input_dim)
while len(model1.encode_layers) > 0:
model.add_encode_layer(model1.pop_encode_layer())
while len(model2.encode_layers) > 0:
model.add_encode_layer(model2.pop_encode_layer())
while len(model2.decode_layers) > 0:
model.add_decode_layer(model2.pop_decode_layer())
while len(model1.decode_layers) > 0:
model.add_decode_layer(model1.pop_decode_layer())
return model
def run(self):
dataset = self.build_dataset()
learning_rule = self.build_learning_rule()
learn_method = self.build_learning_method()
model = self.build_model(dataset.feature_size())
model.layers[0].dropout_below = self.state.hidden1.dropout_below
if self.state.log.save_to_database_name:
database = self.build_database(dataset, learning_rule, learn_method, model)
database['records']['h1_model'] = self.state.hidden1.model
database['records']['h2_model'] = self.state.hidden2.model
log = self.build_log(database)
log.info("Fine Tuning")
for layer in model.layers:
layer.dropout_below = None
layer.noise = None
train_obj = TrainObject(log = log,
dataset = dataset,
learning_rule = learning_rule,
learning_method = learn_method,
model = model)
train_obj.run()
class Laura_Three_Layers(AE):
def __init__(self, state):
self.state = state
def build_model(self, input_dim):
with open(os.environ['PYNET_SAVE_PATH'] + '/log/'
+ self.state.hidden1.model + '/model.pkl') as f1:
model1 = cPickle.load(f1)
with open(os.environ['PYNET_SAVE_PATH'] + '/log/'
+ self.state.hidden2.model + '/model.pkl') as f2:
model2 = cPickle.load(f2)
with open(os.environ['PYNET_SAVE_PATH'] + '/log/'
+ self.state.hidden3.model + '/model.pkl') as f3:
model3 = cPickle.load(f3)
model = AutoEncoder(input_dim=input_dim)
model.add_encode_layer(model1.pop_encode_layer())
model.add_encode_layer(model2.pop_encode_layer())
model.add_encode_layer(model3.pop_encode_layer())
model.add_decode_layer(model3.pop_decode_layer())
model.add_decode_layer(model2.pop_decode_layer())
model.add_decode_layer(model1.pop_decode_layer())
return model
def run(self):
dataset = self.build_dataset()
learning_rule = self.build_learning_rule()
learn_method = self.build_learning_method()
model = self.build_model(dataset.feature_size())
model.layers[0].dropout_below = self.state.hidden1.dropout_below
model.layers[1].dropout_below = self.state.hidden2.dropout_below
model.layers[2].dropout_below = self.state.hidden3.dropout_below
if self.state.log.save_to_database_name:
database = self.build_database(dataset, learning_rule, learn_method, model)
database['records']['h1_model'] = self.state.hidden1.model
database['records']['h2_model'] = self.state.hidden2.model
database['records']['h3_model'] = self.state.hidden3.model
log = self.build_log(database)
log.info("Fine Tuning")
for layer in model.layers:
layer.dropout_below = None
layer.noise = None
train_obj = TrainObject(log = log,
dataset = dataset,
learning_rule = learning_rule,
learning_method = learn_method,
model = model)
train_obj.run()
class Laura_Two_Layers_No_Transpose(AE):
def __init__(self, state):
self.state = state
def run(self):
dataset = self.build_dataset()
learning_rule = self.build_learning_rule()
learn_method = self.build_learning_method()
if self.state.num_layers == 1:
model = self.build_one_hid_model_no_transpose(dataset.feature_size())
else:
raise ValueError()
if self.state.log.save_to_database_name:
database = self.build_database(dataset, learning_rule, learn_method, model)
log = self.build_log(database)
train_obj = TrainObject(log = log,
dataset = dataset,
learning_rule = learning_rule,
learning_method = learn_method,
model = model)
train_obj.run()
# fine tuning
log.info("fine tuning")
train_obj.model.layers[0].dropout_below = None
train_obj.setup()
train_obj.run()
|
hycis/Pynet
|
hps/deprecated/AE.py
|
Python
|
apache-2.0
| 30,172
|
[
"Gaussian"
] |
512d207470e97e5b750bcc1c5ffdbf9f4f2c3943d803ec683e86d53fbadc1101
|
import numpy as np
from numpy import linalg
from statsmodels.distributions.empirical_distribution import StepFunction
from scipy.stats import norm
from scipy.integrate import nquad
import cProfile, pstats, StringIO
from scipy.special import ndtri
from scipy.linalg import get_blas_funcs
class SampledPDF(StepFunction):
"""Like ECDF, but supports inverse and provides intervals around a point."""
def __init__(self, centers, densities, span_around):
self.centers = np.array(centers)
self.pp = np.cumsum(np.array(densities))
if self.pp[-1] != 1:
self.pp = self.pp / self.pp[-1]
super(SampledPDF, self).__init__(centers, self.pp, sorted=True)
self.span_around = span_around
def inverse(self, pp):
if len(np.array(pp).shape) == 0:
pp = np.array([pp])
indexes = np.searchsorted(self.pp, pp) - 1
useiis = indexes
useiis[indexes < 0] = 0
results = np.array(self.centers[useiis], dtype=float)
results[indexes < 0] = -np.inf
return results
def cdf_around(self, xx):
"""Return the cummulative distribution in the span around this point."""
x01 = self.span_around(xx)
if x01[0] > self.centers[-1]:
return (self.pp[-2], 1)
if x01[1] < self.centers[0]:
return (0, self.pp[1])
return (self(x01[0]), self(x01[1]))
class ConstantPDF(SampledPDF):
def __init__(self, centers, span_around):
densities = np.ones(len(centers)) / len(centers)
super(ConstantPDF, self).__init__(centers, densities, span_around)
class GaussianCopula(object):
def __init__(self, dists, corrs, limit=2):
"""dists is a vector of SampledPDFs. corrs is an np.matrix of correlations."""
self.dists = dists
self.R = 2 * np.matrix(np.sin(corrs * np.pi / 6))
self.limit = limit
self.copula_eval = None
def __call__(self, *xxs, **options):
u0s = []
u1s = []
for ii in range(len(xxs)):
u01 = self.dists[ii].cdf_around(xxs[ii])
u0s.append(u01[0])
u1s.append(u01[1])
if np.any(np.array(u0s) == np.array(u1s)):
return 0
return self.integrate(u0s, u1s, **options)
#uus = [self.dists[ii](xxs[ii]) for ii in range(len(xxs))]
#pp = get_copula_function()(uus)
def get_copula_function(self):
if self.copula_eval is not None:
return self.copula_eval
#1/sqrt(det R) * exp(-.5 (phi-1(us))T . (R^-1 - I) . (phi-1(us)))
coef = 1.0 / np.sqrt(np.abs(linalg.det(self.R)))
variance = self.R.getI() - np.identity(self.R.shape[0])
blas_symv = get_blas_funcs("symv", [variance])
blas_dot = get_blas_funcs("dot", [variance])
def eval(*us):
invphis = ndtri(us) #Faster than norm.ppf(us)
#sandwich = (invphis * variance).dot(invphis)
sandwich = blas_dot(invphis, blas_symv(1, variance, invphis))
return coef * np.exp(-.5 * sandwich)
self.copula_eval = eval
return eval
def integrate(self, u0s, u1s, limit=None, permult=1):
"""u0s and u1s are vectors of start and end points for spans of the
Gaussian copula to integrate."""
if limit is None:
limit = self.limit
eval = self.get_copula_function()
if limit == 0:
u0s = np.array(u0s)
u1s = np.array(u1s)
return np.prod(permult * (u1s - u0s)) * eval(*((u0s + u1s) / 2))
return nquad(eval, zip(u0s, u1s), opts=[{'limit': limit}] * len(u0s))[0]
if __name__ == '__main__':
span_around = lambda x: (x - 25, x + 25)
fx = SampledPDF([25, 75], [.5, .5], span_around)
print "Evluating distribution:", [fx(0), fx(50), fx(100)]
print "Degenerate case:", GaussianCopula([fx], np.identity(1))(25) # Degenerate case
gx = SampledPDF([25, 75], [.5, .5], span_around)
print "Checking on integration differences."
print norm.ppf([.999, 1])
eval = GaussianCopula([fx, gx], np.matrix([[1, .5], [.5, 1]])).get_copula_function()
print eval(.95, .95)
print eval(.999, .999)
print eval(1.0, 1.0)
print GaussianCopula([fx, gx], np.matrix([[1, .5], [.5, 1]]))(95, 95)
print GaussianCopula([fx, gx], np.matrix([[1, .5], [.5, 1]]))(99, 99)
print GaussianCopula([fx, gx], np.matrix([[1, .5], [.5, 1]]))(100, 100)
exit()
print "No correlation:", GaussianCopula([fx, gx], np.identity(2))(25, 25)
print "No correlation:", GaussianCopula([fx, gx], np.identity(2))(75, 25)
pr = cProfile.Profile()
pr.enable()
print "Full correlation:", GaussianCopula([fx, gx], np.matrix([[1, .999], [.999, 1]]))(25, 25)
print "Full correlation:", GaussianCopula([fx, gx], np.matrix([[1, .999], [.999, 1]]))(75, 25)
pr.disable()
s = StringIO.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
#ps.print_stats()
#print s.getvalue()
print "Some correlation:", GaussianCopula([fx, gx], np.matrix([[1, .5], [.5, 1]]))(25, 25)
print "Some correlation:", GaussianCopula([fx, gx], np.matrix([[1, .5], [.5, 1]]))(75, 25)
|
eicoffee/tools
|
lib/copula.py
|
Python
|
gpl-2.0
| 5,224
|
[
"Gaussian"
] |
2fd2ca916584acd2761e11781024e53341ab31e4e4aec197bfeb64989dbbb90b
|
# Twisted, the Framework of Your Internet
# Copyright (C) 2001 Matthew W. Lefkowitz
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" A One-Time Password System based on RFC 2289
The class Authenticator contains the hashing-logic, and the parser for the
readable output. It also contains challenge which returns a string describing
the authentication scheme for a client.
OTP is a password container for an user on a server.
NOTE: Does not take care of transmitting the shared secret password.
At the end there's a dict called dict which is dictionary contain 2048
words for storing pronouncable 11-bit values. Taken from RFC 1760.
Uses the MD5- and SHA-algorithms for hashing
Todo: RFC2444, SASL (perhaps), parsing hex-responses
"""
import string
import random
def stringToLong(s):
""" Convert digest to long """
result = 0L
for byte in s:
result = (256 * result) + ord(byte)
return result
def stringToDWords(s):
""" Convert digest to a list of four 32-bits words """
result = []
for a in xrange(len(s) / 4):
tmp = 0L
for byte in s[-4:]:
tmp = (256 * tmp) + ord(byte)
result.append(tmp)
s = s[:-4]
return result
def longToString(l):
""" Convert long to digest """
result = ""
while l > 0L:
result = chr(l % 256) + result
l = l / 256L
return result
import md5, sha
hashid = {md5: 'md5', sha: 'sha1'}
INITIALSEQUENCE = 1000
MINIMUMSEQUENCE = 50
class Unauthorized(Exception):
"""the Unauthorized exception
This exception is raised when an action is not allowed, or a user is not
authenticated properly.
"""
class OTPAuthenticator:
"""A One Time Password System
Based on RFC 2289, which is based on a the S/KEY Authentication-scheme.
It uses the MD5- and SHA-algorithms for hashing
The variable OTP is at all times a 64bit string"""
def __init__(self, hash = md5):
"Set the hash to either md5 or sha1"
self.hash = hash
pass
def generateSeed(self):
"Return a 10 char random seed, with 6 lowercase chars and 4 digits"
seed = ''
for x in range(6):
seed = seed + chr(random.randrange(97,122))
for x in range(4):
seed = seed + chr(random.randrange(48,57))
return seed
def foldDigest(self, otp):
if self.hash == md5:
return self.foldDigest128(otp)
if self.hash == sha:
return self.foldDigest160(otp)
def foldDigest128(self, otp128):
"Fold a 128 bit digest to 64 bit"
regs = stringToDWords(otp128)
p0 = regs[0] ^ regs[2]
p1 = regs[1] ^ regs[3]
S = ''
for a in xrange(4):
S = chr(p0 & 0xFF) + S
p0 = p0 >> 8
for a in xrange(4):
S = chr(p1 & 0xFF) + S
p1 = p1 >> 8
return S
def foldDigest160(self, otp160):
"Fold a 160 bit digest to 64 bit"
regs = stringToDWords(otp160)
p0 = regs[0] ^ regs[2]
p1 = regs[1] ^ regs[3]
p0 = regs[0] ^ regs[4]
S = ''
for a in xrange(4):
S = chr(p0 & 0xFF) + S
p0 = p0 >> 8
for a in xrange(4):
S = chr(p1 & 0xFF) + S
p1 = p1 >> 8
return S
def hashUpdate(self, digest):
"Run through the hash and fold to 64 bit"
h = self.hash.new(digest)
return self.foldDigest(h.digest())
def generateOTP(self, seed, passwd, sequence):
"""Return a 64 bit OTP based on inputs
Run through makeReadable to get a 6 word pass-phrase"""
seed = string.lower(seed)
otp = self.hashUpdate(seed + passwd)
for a in xrange(sequence):
otp = self.hashUpdate(otp)
return otp
def calculateParity(self, otp):
"Calculate the parity from a 64bit OTP"
parity = 0
for i in xrange(0, 64, 2):
parity = parity + otp & 0x3
otp = otp >> 2
return parity
def makeReadable(self, otp):
"Returns a 6 word pass-phrase from a 64bit OTP"
digest = stringToLong(otp)
list = []
parity = self.calculateParity(digest)
for i in xrange(4,-1, -1):
list.append(dict[(digest >> (i * 11 + 9)) & 0x7FF])
list.append(dict[(digest << 2) & 0x7FC | (parity & 0x03)])
return string.join(list)
def challenge(self, seed, sequence):
"""Return a challenge in the format otp-<hash> <sequence> <seed>"""
return "otp-%s %i %s" % (hashid[self.hash], sequence, seed)
def parsePhrase(self, phrase):
"""Decode the phrase, and return a 64bit OTP
I will raise Unauthorized if the parity is wrong
TODO: Add support for hex (MUST) and the '2nd scheme'(SHOULD)"""
words = string.split(phrase)
for i in xrange(len(words)):
words[i] = string.upper(words[i])
b = 0L
for i in xrange(0,5):
b = b | ((long(dict.index(words[i])) << ((4-i)*11L+9L)))
tmp = dict.index(words[5])
b = b | (tmp & 0x7FC ) >> 2
if (tmp & 3) <> self.calculateParity(b):
raise Unauthorized("Parity error")
digest = longToString(b)
return digest
class OTP(OTPAuthenticator):
"""An automatic version of the OTP-Authenticator
Updates the sequence and the keeps last approved password on success
On the next authentication, the stored password is hashed and checked
up against the one given by the user. If they match, the sequencecounter
is decreased and the circle is closed.
This object should be glued to each user
Note:
It does NOT reset the sequence when the combinations left approach zero,
This has to be done manuelly by instancing a new object
"""
seed = None
sequence = 0
lastotp = None
def __init__(self, passwd, sequence = INITIALSEQUENCE, hash=md5):
"""Initialize the OTP-Sequence, and discard the password"""
OTPAuthenticator.__init__(self, hash)
seed = self.generateSeed()
# Generate the 'last' password
self.lastotp = OTPAuthenticator.generateOTP(self, seed, passwd, sequence+1)
self.seed = seed
self.sequence = sequence
def challenge(self):
"""Return a challenge string"""
result = OTPAuthenticator.challenge(self, self.seed, self.sequence)
return result
def authenticate(self, phrase):
"""Test the phrase against the last challenge issued"""
try:
digest = self.parsePhrase(phrase)
hasheddigest = self.hashUpdate(digest)
if (self.lastotp == hasheddigest):
self.lastotp = digest
if self.sequence > MINIMUMSEQUENCE:
self.sequence = self.sequence - 1
return "ok"
else:
raise Unauthorized("Failed")
except Unauthorized, msg:
raise Unauthorized(msg)
#
# The 2048 word standard dictionary from RFC 1760
#
dict = ["A", "ABE", "ACE", "ACT", "AD", "ADA", "ADD",
"AGO", "AID", "AIM", "AIR", "ALL", "ALP", "AM", "AMY",
"AN", "ANA", "AND", "ANN", "ANT", "ANY", "APE", "APS",
"APT", "ARC", "ARE", "ARK", "ARM", "ART", "AS", "ASH",
"ASK", "AT", "ATE", "AUG", "AUK", "AVE", "AWE", "AWK",
"AWL", "AWN", "AX", "AYE", "BAD", "BAG", "BAH", "BAM",
"BAN", "BAR", "BAT", "BAY", "BE", "BED", "BEE", "BEG",
"BEN", "BET", "BEY", "BIB", "BID", "BIG", "BIN", "BIT",
"BOB", "BOG", "BON", "BOO", "BOP", "BOW", "BOY", "BUB",
"BUD", "BUG", "BUM", "BUN", "BUS", "BUT", "BUY", "BY",
"BYE", "CAB", "CAL", "CAM", "CAN", "CAP", "CAR", "CAT",
"CAW", "COD", "COG", "COL", "CON", "COO", "COP", "COT",
"COW", "COY", "CRY", "CUB", "CUE", "CUP", "CUR", "CUT",
"DAB", "DAD", "DAM", "DAN", "DAR", "DAY", "DEE", "DEL",
"DEN", "DES", "DEW", "DID", "DIE", "DIG", "DIN", "DIP",
"DO", "DOE", "DOG", "DON", "DOT", "DOW", "DRY", "DUB",
"DUD", "DUE", "DUG", "DUN", "EAR", "EAT", "ED", "EEL",
"EGG", "EGO", "ELI", "ELK", "ELM", "ELY", "EM", "END",
"EST", "ETC", "EVA", "EVE", "EWE", "EYE", "FAD", "FAN",
"FAR", "FAT", "FAY", "FED", "FEE", "FEW", "FIB", "FIG",
"FIN", "FIR", "FIT", "FLO", "FLY", "FOE", "FOG", "FOR",
"FRY", "FUM", "FUN", "FUR", "GAB", "GAD", "GAG", "GAL",
"GAM", "GAP", "GAS", "GAY", "GEE", "GEL", "GEM", "GET",
"GIG", "GIL", "GIN", "GO", "GOT", "GUM", "GUN", "GUS",
"GUT", "GUY", "GYM", "GYP", "HA", "HAD", "HAL", "HAM",
"HAN", "HAP", "HAS", "HAT", "HAW", "HAY", "HE", "HEM",
"HEN", "HER", "HEW", "HEY", "HI", "HID", "HIM", "HIP",
"HIS", "HIT", "HO", "HOB", "HOC", "HOE", "HOG", "HOP",
"HOT", "HOW", "HUB", "HUE", "HUG", "HUH", "HUM", "HUT",
"I", "ICY", "IDA", "IF", "IKE", "ILL", "INK", "INN",
"IO", "ION", "IQ", "IRA", "IRE", "IRK", "IS", "IT",
"ITS", "IVY", "JAB", "JAG", "JAM", "JAN", "JAR", "JAW",
"JAY", "JET", "JIG", "JIM", "JO", "JOB", "JOE", "JOG",
"JOT", "JOY", "JUG", "JUT", "KAY", "KEG", "KEN", "KEY",
"KID", "KIM", "KIN", "KIT", "LA", "LAB", "LAC", "LAD",
"LAG", "LAM", "LAP", "LAW", "LAY", "LEA", "LED", "LEE",
"LEG", "LEN", "LEO", "LET", "LEW", "LID", "LIE", "LIN",
"LIP", "LIT", "LO", "LOB", "LOG", "LOP", "LOS", "LOT",
"LOU", "LOW", "LOY", "LUG", "LYE", "MA", "MAC", "MAD",
"MAE", "MAN", "MAO", "MAP", "MAT", "MAW", "MAY", "ME",
"MEG", "MEL", "MEN", "MET", "MEW", "MID", "MIN", "MIT",
"MOB", "MOD", "MOE", "MOO", "MOP", "MOS", "MOT", "MOW",
"MUD", "MUG", "MUM", "MY", "NAB", "NAG", "NAN", "NAP",
"NAT", "NAY", "NE", "NED", "NEE", "NET", "NEW", "NIB",
"NIL", "NIP", "NIT", "NO", "NOB", "NOD", "NON", "NOR",
"NOT", "NOV", "NOW", "NU", "NUN", "NUT", "O", "OAF",
"OAK", "OAR", "OAT", "ODD", "ODE", "OF", "OFF", "OFT",
"OH", "OIL", "OK", "OLD", "ON", "ONE", "OR", "ORB",
"ORE", "ORR", "OS", "OTT", "OUR", "OUT", "OVA", "OW",
"OWE", "OWL", "OWN", "OX", "PA", "PAD", "PAL", "PAM",
"PAN", "PAP", "PAR", "PAT", "PAW", "PAY", "PEA", "PEG",
"PEN", "PEP", "PER", "PET", "PEW", "PHI", "PI", "PIE",
"PIN", "PIT", "PLY", "PO", "POD", "POE", "POP", "POT",
"POW", "PRO", "PRY", "PUB", "PUG", "PUN", "PUP", "PUT",
"QUO", "RAG", "RAM", "RAN", "RAP", "RAT", "RAW", "RAY",
"REB", "RED", "REP", "RET", "RIB", "RID", "RIG", "RIM",
"RIO", "RIP", "ROB", "ROD", "ROE", "RON", "ROT", "ROW",
"ROY", "RUB", "RUE", "RUG", "RUM", "RUN", "RYE", "SAC",
"SAD", "SAG", "SAL", "SAM", "SAN", "SAP", "SAT", "SAW",
"SAY", "SEA", "SEC", "SEE", "SEN", "SET", "SEW", "SHE",
"SHY", "SIN", "SIP", "SIR", "SIS", "SIT", "SKI", "SKY",
"SLY", "SO", "SOB", "SOD", "SON", "SOP", "SOW", "SOY",
"SPA", "SPY", "SUB", "SUD", "SUE", "SUM", "SUN", "SUP",
"TAB", "TAD", "TAG", "TAN", "TAP", "TAR", "TEA", "TED",
"TEE", "TEN", "THE", "THY", "TIC", "TIE", "TIM", "TIN",
"TIP", "TO", "TOE", "TOG", "TOM", "TON", "TOO", "TOP",
"TOW", "TOY", "TRY", "TUB", "TUG", "TUM", "TUN", "TWO",
"UN", "UP", "US", "USE", "VAN", "VAT", "VET", "VIE",
"WAD", "WAG", "WAR", "WAS", "WAY", "WE", "WEB", "WED",
"WEE", "WET", "WHO", "WHY", "WIN", "WIT", "WOK", "WON",
"WOO", "WOW", "WRY", "WU", "YAM", "YAP", "YAW", "YE",
"YEA", "YES", "YET", "YOU", "ABED", "ABEL", "ABET", "ABLE",
"ABUT", "ACHE", "ACID", "ACME", "ACRE", "ACTA", "ACTS", "ADAM",
"ADDS", "ADEN", "AFAR", "AFRO", "AGEE", "AHEM", "AHOY", "AIDA",
"AIDE", "AIDS", "AIRY", "AJAR", "AKIN", "ALAN", "ALEC", "ALGA",
"ALIA", "ALLY", "ALMA", "ALOE", "ALSO", "ALTO", "ALUM", "ALVA",
"AMEN", "AMES", "AMID", "AMMO", "AMOK", "AMOS", "AMRA", "ANDY",
"ANEW", "ANNA", "ANNE", "ANTE", "ANTI", "AQUA", "ARAB", "ARCH",
"AREA", "ARGO", "ARID", "ARMY", "ARTS", "ARTY", "ASIA", "ASKS",
"ATOM", "AUNT", "AURA", "AUTO", "AVER", "AVID", "AVIS", "AVON",
"AVOW", "AWAY", "AWRY", "BABE", "BABY", "BACH", "BACK", "BADE",
"BAIL", "BAIT", "BAKE", "BALD", "BALE", "BALI", "BALK", "BALL",
"BALM", "BAND", "BANE", "BANG", "BANK", "BARB", "BARD", "BARE",
"BARK", "BARN", "BARR", "BASE", "BASH", "BASK", "BASS", "BATE",
"BATH", "BAWD", "BAWL", "BEAD", "BEAK", "BEAM", "BEAN", "BEAR",
"BEAT", "BEAU", "BECK", "BEEF", "BEEN", "BEER", "BEET", "BELA",
"BELL", "BELT", "BEND", "BENT", "BERG", "BERN", "BERT", "BESS",
"BEST", "BETA", "BETH", "BHOY", "BIAS", "BIDE", "BIEN", "BILE",
"BILK", "BILL", "BIND", "BING", "BIRD", "BITE", "BITS", "BLAB",
"BLAT", "BLED", "BLEW", "BLOB", "BLOC", "BLOT", "BLOW", "BLUE",
"BLUM", "BLUR", "BOAR", "BOAT", "BOCA", "BOCK", "BODE", "BODY",
"BOGY", "BOHR", "BOIL", "BOLD", "BOLO", "BOLT", "BOMB", "BONA",
"BOND", "BONE", "BONG", "BONN", "BONY", "BOOK", "BOOM", "BOON",
"BOOT", "BORE", "BORG", "BORN", "BOSE", "BOSS", "BOTH", "BOUT",
"BOWL", "BOYD", "BRAD", "BRAE", "BRAG", "BRAN", "BRAY", "BRED",
"BREW", "BRIG", "BRIM", "BROW", "BUCK", "BUDD", "BUFF", "BULB",
"BULK", "BULL", "BUNK", "BUNT", "BUOY", "BURG", "BURL", "BURN",
"BURR", "BURT", "BURY", "BUSH", "BUSS", "BUST", "BUSY", "BYTE",
"CADY", "CAFE", "CAGE", "CAIN", "CAKE", "CALF", "CALL", "CALM",
"CAME", "CANE", "CANT", "CARD", "CARE", "CARL", "CARR", "CART",
"CASE", "CASH", "CASK", "CAST", "CAVE", "CEIL", "CELL", "CENT",
"CERN", "CHAD", "CHAR", "CHAT", "CHAW", "CHEF", "CHEN", "CHEW",
"CHIC", "CHIN", "CHOU", "CHOW", "CHUB", "CHUG", "CHUM", "CITE",
"CITY", "CLAD", "CLAM", "CLAN", "CLAW", "CLAY", "CLOD", "CLOG",
"CLOT", "CLUB", "CLUE", "COAL", "COAT", "COCA", "COCK", "COCO",
"CODA", "CODE", "CODY", "COED", "COIL", "COIN", "COKE", "COLA",
"COLD", "COLT", "COMA", "COMB", "COME", "COOK", "COOL", "COON",
"COOT", "CORD", "CORE", "CORK", "CORN", "COST", "COVE", "COWL",
"CRAB", "CRAG", "CRAM", "CRAY", "CREW", "CRIB", "CROW", "CRUD",
"CUBA", "CUBE", "CUFF", "CULL", "CULT", "CUNY", "CURB", "CURD",
"CURE", "CURL", "CURT", "CUTS", "DADE", "DALE", "DAME", "DANA",
"DANE", "DANG", "DANK", "DARE", "DARK", "DARN", "DART", "DASH",
"DATA", "DATE", "DAVE", "DAVY", "DAWN", "DAYS", "DEAD", "DEAF",
"DEAL", "DEAN", "DEAR", "DEBT", "DECK", "DEED", "DEEM", "DEER",
"DEFT", "DEFY", "DELL", "DENT", "DENY", "DESK", "DIAL", "DICE",
"DIED", "DIET", "DIME", "DINE", "DING", "DINT", "DIRE", "DIRT",
"DISC", "DISH", "DISK", "DIVE", "DOCK", "DOES", "DOLE", "DOLL",
"DOLT", "DOME", "DONE", "DOOM", "DOOR", "DORA", "DOSE", "DOTE",
"DOUG", "DOUR", "DOVE", "DOWN", "DRAB", "DRAG", "DRAM", "DRAW",
"DREW", "DRUB", "DRUG", "DRUM", "DUAL", "DUCK", "DUCT", "DUEL",
"DUET", "DUKE", "DULL", "DUMB", "DUNE", "DUNK", "DUSK", "DUST",
"DUTY", "EACH", "EARL", "EARN", "EASE", "EAST", "EASY", "EBEN",
"ECHO", "EDDY", "EDEN", "EDGE", "EDGY", "EDIT", "EDNA", "EGAN",
"ELAN", "ELBA", "ELLA", "ELSE", "EMIL", "EMIT", "EMMA", "ENDS",
"ERIC", "EROS", "EVEN", "EVER", "EVIL", "EYED", "FACE", "FACT",
"FADE", "FAIL", "FAIN", "FAIR", "FAKE", "FALL", "FAME", "FANG",
"FARM", "FAST", "FATE", "FAWN", "FEAR", "FEAT", "FEED", "FEEL",
"FEET", "FELL", "FELT", "FEND", "FERN", "FEST", "FEUD", "FIEF",
"FIGS", "FILE", "FILL", "FILM", "FIND", "FINE", "FINK", "FIRE",
"FIRM", "FISH", "FISK", "FIST", "FITS", "FIVE", "FLAG", "FLAK",
"FLAM", "FLAT", "FLAW", "FLEA", "FLED", "FLEW", "FLIT", "FLOC",
"FLOG", "FLOW", "FLUB", "FLUE", "FOAL", "FOAM", "FOGY", "FOIL",
"FOLD", "FOLK", "FOND", "FONT", "FOOD", "FOOL", "FOOT", "FORD",
"FORE", "FORK", "FORM", "FORT", "FOSS", "FOUL", "FOUR", "FOWL",
"FRAU", "FRAY", "FRED", "FREE", "FRET", "FREY", "FROG", "FROM",
"FUEL", "FULL", "FUME", "FUND", "FUNK", "FURY", "FUSE", "FUSS",
"GAFF", "GAGE", "GAIL", "GAIN", "GAIT", "GALA", "GALE", "GALL",
"GALT", "GAME", "GANG", "GARB", "GARY", "GASH", "GATE", "GAUL",
"GAUR", "GAVE", "GAWK", "GEAR", "GELD", "GENE", "GENT", "GERM",
"GETS", "GIBE", "GIFT", "GILD", "GILL", "GILT", "GINA", "GIRD",
"GIRL", "GIST", "GIVE", "GLAD", "GLEE", "GLEN", "GLIB", "GLOB",
"GLOM", "GLOW", "GLUE", "GLUM", "GLUT", "GOAD", "GOAL", "GOAT",
"GOER", "GOES", "GOLD", "GOLF", "GONE", "GONG", "GOOD", "GOOF",
"GORE", "GORY", "GOSH", "GOUT", "GOWN", "GRAB", "GRAD", "GRAY",
"GREG", "GREW", "GREY", "GRID", "GRIM", "GRIN", "GRIT", "GROW",
"GRUB", "GULF", "GULL", "GUNK", "GURU", "GUSH", "GUST", "GWEN",
"GWYN", "HAAG", "HAAS", "HACK", "HAIL", "HAIR", "HALE", "HALF",
"HALL", "HALO", "HALT", "HAND", "HANG", "HANK", "HANS", "HARD",
"HARK", "HARM", "HART", "HASH", "HAST", "HATE", "HATH", "HAUL",
"HAVE", "HAWK", "HAYS", "HEAD", "HEAL", "HEAR", "HEAT", "HEBE",
"HECK", "HEED", "HEEL", "HEFT", "HELD", "HELL", "HELM", "HERB",
"HERD", "HERE", "HERO", "HERS", "HESS", "HEWN", "HICK", "HIDE",
"HIGH", "HIKE", "HILL", "HILT", "HIND", "HINT", "HIRE", "HISS",
"HIVE", "HOBO", "HOCK", "HOFF", "HOLD", "HOLE", "HOLM", "HOLT",
"HOME", "HONE", "HONK", "HOOD", "HOOF", "HOOK", "HOOT", "HORN",
"HOSE", "HOST", "HOUR", "HOVE", "HOWE", "HOWL", "HOYT", "HUCK",
"HUED", "HUFF", "HUGE", "HUGH", "HUGO", "HULK", "HULL", "HUNK",
"HUNT", "HURD", "HURL", "HURT", "HUSH", "HYDE", "HYMN", "IBIS",
"ICON", "IDEA", "IDLE", "IFFY", "INCA", "INCH", "INTO", "IONS",
"IOTA", "IOWA", "IRIS", "IRMA", "IRON", "ISLE", "ITCH", "ITEM",
"IVAN", "JACK", "JADE", "JAIL", "JAKE", "JANE", "JAVA", "JEAN",
"JEFF", "JERK", "JESS", "JEST", "JIBE", "JILL", "JILT", "JIVE",
"JOAN", "JOBS", "JOCK", "JOEL", "JOEY", "JOHN", "JOIN", "JOKE",
"JOLT", "JOVE", "JUDD", "JUDE", "JUDO", "JUDY", "JUJU", "JUKE",
"JULY", "JUNE", "JUNK", "JUNO", "JURY", "JUST", "JUTE", "KAHN",
"KALE", "KANE", "KANT", "KARL", "KATE", "KEEL", "KEEN", "KENO",
"KENT", "KERN", "KERR", "KEYS", "KICK", "KILL", "KIND", "KING",
"KIRK", "KISS", "KITE", "KLAN", "KNEE", "KNEW", "KNIT", "KNOB",
"KNOT", "KNOW", "KOCH", "KONG", "KUDO", "KURD", "KURT", "KYLE",
"LACE", "LACK", "LACY", "LADY", "LAID", "LAIN", "LAIR", "LAKE",
"LAMB", "LAME", "LAND", "LANE", "LANG", "LARD", "LARK", "LASS",
"LAST", "LATE", "LAUD", "LAVA", "LAWN", "LAWS", "LAYS", "LEAD",
"LEAF", "LEAK", "LEAN", "LEAR", "LEEK", "LEER", "LEFT", "LEND",
"LENS", "LENT", "LEON", "LESK", "LESS", "LEST", "LETS", "LIAR",
"LICE", "LICK", "LIED", "LIEN", "LIES", "LIEU", "LIFE", "LIFT",
"LIKE", "LILA", "LILT", "LILY", "LIMA", "LIMB", "LIME", "LIND",
"LINE", "LINK", "LINT", "LION", "LISA", "LIST", "LIVE", "LOAD",
"LOAF", "LOAM", "LOAN", "LOCK", "LOFT", "LOGE", "LOIS", "LOLA",
"LONE", "LONG", "LOOK", "LOON", "LOOT", "LORD", "LORE", "LOSE",
"LOSS", "LOST", "LOUD", "LOVE", "LOWE", "LUCK", "LUCY", "LUGE",
"LUKE", "LULU", "LUND", "LUNG", "LURA", "LURE", "LURK", "LUSH",
"LUST", "LYLE", "LYNN", "LYON", "LYRA", "MACE", "MADE", "MAGI",
"MAID", "MAIL", "MAIN", "MAKE", "MALE", "MALI", "MALL", "MALT",
"MANA", "MANN", "MANY", "MARC", "MARE", "MARK", "MARS", "MART",
"MARY", "MASH", "MASK", "MASS", "MAST", "MATE", "MATH", "MAUL",
"MAYO", "MEAD", "MEAL", "MEAN", "MEAT", "MEEK", "MEET", "MELD",
"MELT", "MEMO", "MEND", "MENU", "MERT", "MESH", "MESS", "MICE",
"MIKE", "MILD", "MILE", "MILK", "MILL", "MILT", "MIMI", "MIND",
"MINE", "MINI", "MINK", "MINT", "MIRE", "MISS", "MIST", "MITE",
"MITT", "MOAN", "MOAT", "MOCK", "MODE", "MOLD", "MOLE", "MOLL",
"MOLT", "MONA", "MONK", "MONT", "MOOD", "MOON", "MOOR", "MOOT",
"MORE", "MORN", "MORT", "MOSS", "MOST", "MOTH", "MOVE", "MUCH",
"MUCK", "MUDD", "MUFF", "MULE", "MULL", "MURK", "MUSH", "MUST",
"MUTE", "MUTT", "MYRA", "MYTH", "NAGY", "NAIL", "NAIR", "NAME",
"NARY", "NASH", "NAVE", "NAVY", "NEAL", "NEAR", "NEAT", "NECK",
"NEED", "NEIL", "NELL", "NEON", "NERO", "NESS", "NEST", "NEWS",
"NEWT", "NIBS", "NICE", "NICK", "NILE", "NINA", "NINE", "NOAH",
"NODE", "NOEL", "NOLL", "NONE", "NOOK", "NOON", "NORM", "NOSE",
"NOTE", "NOUN", "NOVA", "NUDE", "NULL", "NUMB", "OATH", "OBEY",
"OBOE", "ODIN", "OHIO", "OILY", "OINT", "OKAY", "OLAF", "OLDY",
"OLGA", "OLIN", "OMAN", "OMEN", "OMIT", "ONCE", "ONES", "ONLY",
"ONTO", "ONUS", "ORAL", "ORGY", "OSLO", "OTIS", "OTTO", "OUCH",
"OUST", "OUTS", "OVAL", "OVEN", "OVER", "OWLY", "OWNS", "QUAD",
"QUIT", "QUOD", "RACE", "RACK", "RACY", "RAFT", "RAGE", "RAID",
"RAIL", "RAIN", "RAKE", "RANK", "RANT", "RARE", "RASH", "RATE",
"RAVE", "RAYS", "READ", "REAL", "REAM", "REAR", "RECK", "REED",
"REEF", "REEK", "REEL", "REID", "REIN", "RENA", "REND", "RENT",
"REST", "RICE", "RICH", "RICK", "RIDE", "RIFT", "RILL", "RIME",
"RING", "RINK", "RISE", "RISK", "RITE", "ROAD", "ROAM", "ROAR",
"ROBE", "ROCK", "RODE", "ROIL", "ROLL", "ROME", "ROOD", "ROOF",
"ROOK", "ROOM", "ROOT", "ROSA", "ROSE", "ROSS", "ROSY", "ROTH",
"ROUT", "ROVE", "ROWE", "ROWS", "RUBE", "RUBY", "RUDE", "RUDY",
"RUIN", "RULE", "RUNG", "RUNS", "RUNT", "RUSE", "RUSH", "RUSK",
"RUSS", "RUST", "RUTH", "SACK", "SAFE", "SAGE", "SAID", "SAIL",
"SALE", "SALK", "SALT", "SAME", "SAND", "SANE", "SANG", "SANK",
"SARA", "SAUL", "SAVE", "SAYS", "SCAN", "SCAR", "SCAT", "SCOT",
"SEAL", "SEAM", "SEAR", "SEAT", "SEED", "SEEK", "SEEM", "SEEN",
"SEES", "SELF", "SELL", "SEND", "SENT", "SETS", "SEWN", "SHAG",
"SHAM", "SHAW", "SHAY", "SHED", "SHIM", "SHIN", "SHOD", "SHOE",
"SHOT", "SHOW", "SHUN", "SHUT", "SICK", "SIDE", "SIFT", "SIGH",
"SIGN", "SILK", "SILL", "SILO", "SILT", "SINE", "SING", "SINK",
"SIRE", "SITE", "SITS", "SITU", "SKAT", "SKEW", "SKID", "SKIM",
"SKIN", "SKIT", "SLAB", "SLAM", "SLAT", "SLAY", "SLED", "SLEW",
"SLID", "SLIM", "SLIT", "SLOB", "SLOG", "SLOT", "SLOW", "SLUG",
"SLUM", "SLUR", "SMOG", "SMUG", "SNAG", "SNOB", "SNOW", "SNUB",
"SNUG", "SOAK", "SOAR", "SOCK", "SODA", "SOFA", "SOFT", "SOIL",
"SOLD", "SOME", "SONG", "SOON", "SOOT", "SORE", "SORT", "SOUL",
"SOUR", "SOWN", "STAB", "STAG", "STAN", "STAR", "STAY", "STEM",
"STEW", "STIR", "STOW", "STUB", "STUN", "SUCH", "SUDS", "SUIT",
"SULK", "SUMS", "SUNG", "SUNK", "SURE", "SURF", "SWAB", "SWAG",
"SWAM", "SWAN", "SWAT", "SWAY", "SWIM", "SWUM", "TACK", "TACT",
"TAIL", "TAKE", "TALE", "TALK", "TALL", "TANK", "TASK", "TATE",
"TAUT", "TEAL", "TEAM", "TEAR", "TECH", "TEEM", "TEEN", "TEET",
"TELL", "TEND", "TENT", "TERM", "TERN", "TESS", "TEST", "THAN",
"THAT", "THEE", "THEM", "THEN", "THEY", "THIN", "THIS", "THUD",
"THUG", "TICK", "TIDE", "TIDY", "TIED", "TIER", "TILE", "TILL",
"TILT", "TIME", "TINA", "TINE", "TINT", "TINY", "TIRE", "TOAD",
"TOGO", "TOIL", "TOLD", "TOLL", "TONE", "TONG", "TONY", "TOOK",
"TOOL", "TOOT", "TORE", "TORN", "TOTE", "TOUR", "TOUT", "TOWN",
"TRAG", "TRAM", "TRAY", "TREE", "TREK", "TRIG", "TRIM", "TRIO",
"TROD", "TROT", "TROY", "TRUE", "TUBA", "TUBE", "TUCK", "TUFT",
"TUNA", "TUNE", "TUNG", "TURF", "TURN", "TUSK", "TWIG", "TWIN",
"TWIT", "ULAN", "UNIT", "URGE", "USED", "USER", "USES", "UTAH",
"VAIL", "VAIN", "VALE", "VARY", "VASE", "VAST", "VEAL", "VEDA",
"VEIL", "VEIN", "VEND", "VENT", "VERB", "VERY", "VETO", "VICE",
"VIEW", "VINE", "VISE", "VOID", "VOLT", "VOTE", "WACK", "WADE",
"WAGE", "WAIL", "WAIT", "WAKE", "WALE", "WALK", "WALL", "WALT",
"WAND", "WANE", "WANG", "WANT", "WARD", "WARM", "WARN", "WART",
"WASH", "WAST", "WATS", "WATT", "WAVE", "WAVY", "WAYS", "WEAK",
"WEAL", "WEAN", "WEAR", "WEED", "WEEK", "WEIR", "WELD", "WELL",
"WELT", "WENT", "WERE", "WERT", "WEST", "WHAM", "WHAT", "WHEE",
"WHEN", "WHET", "WHOA", "WHOM", "WICK", "WIFE", "WILD", "WILL",
"WIND", "WINE", "WING", "WINK", "WINO", "WIRE", "WISE", "WISH",
"WITH", "WOLF", "WONT", "WOOD", "WOOL", "WORD", "WORE", "WORK",
"WORM", "WORN", "WOVE", "WRIT", "WYNN", "YALE", "YANG", "YANK",
"YARD", "YARN", "YAWL", "YAWN", "YEAH", "YEAR", "YELL", "YOGA",
"YOKE"]
|
fxia22/ASM_xf
|
PythonD/site_python/twisted/python/otp.py
|
Python
|
gpl-2.0
| 25,849
|
[
"Elk",
"MOE"
] |
7a690b87a2745f58c81190b8f4188817a28d205e7136153114fee8ca371c15c8
|
""" Replicating the results of
Net on-chip Brillouin gain based on suspended
silicon nanowires
Van Laer et al.
http://dx.doi.org/10.1088/1367-2630/17/11/115005
"""
import time
import datetime
import numpy as np
import sys
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import copy
sys.path.append("../backend/")
import materials
import objects
import mode_calcs
import integration
import plotting
from fortran import NumBAT
start = time.time()
# Geometric Parameters - all in nm.
wl_nm = 1550
unitcell_x = 5*wl_nm
unitcell_y = 0.5*unitcell_x
inc_a_x = 450
inc_a_y = 230
inc_shape = 'rectangular'
num_modes_EM_pump = 20
num_modes_EM_Stokes = num_modes_EM_pump
num_modes_AC = 60
EM_ival_pump = 0
EM_ival_Stokes = 0
AC_ival = 'All'
prefix_str = 'lit_05-'
# Rotate crystal axis of Si from <100> to <110>, starting with same Si_2016_Smith data.
Si_110 = copy.deepcopy(materials.materials_dict["Si_2016_Smith"])
Si_110.rotate_axis(np.pi/4,'y-axis', save_rotated_tensors=True)
# Use all specified parameters to create a waveguide object.
wguide = objects.Struct(unitcell_x,inc_a_x,unitcell_y,inc_a_y,inc_shape,
material_bkg=materials.materials_dict["Vacuum"],
material_a=Si_110, symmetry_flag=False,
lc_bkg=1, lc_refine_1=1200.0, lc_refine_2=800.0)
# Expected effective index of fundamental guided mode.
n_eff = wguide.material_a.n-0.1
# Calculate Electromagnetic Modes
sim_EM_pump = wguide.calc_EM_modes(num_modes_EM_pump, wl_nm, n_eff=n_eff)
# np.savez('wguide_data', sim_EM_pump=sim_EM_pump)
# npzfile = np.load('wguide_data.npz')
# sim_EM_pump = npzfile['sim_EM_pump'].tolist()
sim_EM_Stokes = mode_calcs.fwd_Stokes_modes(sim_EM_pump)
# np.savez('wguide_data2', sim_EM_Stokes=sim_EM_Stokes)
# npzfile = np.load('wguide_data2.npz')
# sim_EM_Stokes = npzfile['sim_EM_Stokes'].tolist()
plotting.plt_mode_fields(sim_EM_pump, xlim_min=0.45, xlim_max=0.45,
ivals=[EM_ival_pump], ylim_min=0.45, ylim_max=0.45,
EM_AC='EM_E', n_points=1500,
prefix_str=prefix_str, pdf_png='png')
# Print the wavevectors of EM modes.
print('k_z of EM modes \n', np.round(np.real(sim_EM_pump.Eig_values), 4))
# Calculate the EM effective index of the waveguide.
n_eff_sim = np.real(sim_EM_pump.Eig_values*((wl_nm*1e-9)/(2.*np.pi)))
print("n_eff = ", np.round(n_eff_sim, 4))
k_AC = 5 # close but not quite zero
# Calculate Acoustic Modes
sim_AC = wguide.calc_AC_modes(num_modes_AC, k_AC, EM_sim=sim_EM_pump)
# np.savez('wguide_data_AC', sim_AC=sim_AC)
# npzfile = np.load('wguide_data_AC.npz')
# sim_AC = npzfile['sim_AC'].tolist()
# Print the frequencies of AC modes.
print('Freq of AC modes (GHz) \n', np.round((sim_AC.Eig_values)*1e-9, 4))
print('Freq of AC modes (GHz) \n', np.round(np.real(sim_AC.Eig_values)*1e-9, 4))
plotting.plt_mode_fields(sim_AC, EM_AC='AC', prefix_str=prefix_str, pdf_png='png')
set_q_factor = 230 # NJP
# Calculate interaction integrals and SBS gain for PE and MB effects combined,
# as well as just for PE, and just for MB.
SBS_gain, SBS_gain_PE, SBS_gain_MB, linewidth_Hz, Q_factors, alpha = integration.gain_and_qs(
sim_EM_pump, sim_EM_Stokes, sim_AC, k_AC,
EM_ival_pump=EM_ival_pump, EM_ival_Stokes=EM_ival_Stokes, AC_ival=AC_ival, fixed_Q=set_q_factor)
# Mask negligible gain values to improve clarity of print out.
threshold = 1e-3
masked_PE = np.ma.masked_inside(SBS_gain_PE[EM_ival_pump,EM_ival_Stokes,:], 0, threshold)
masked_MB = np.ma.masked_inside(SBS_gain_MB[EM_ival_pump,EM_ival_Stokes,:], 0, threshold)
masked = np.ma.masked_inside(SBS_gain[EM_ival_pump,EM_ival_Stokes,:], 0, threshold)
print("\n Displaying results with negligible components masked out")
print("SBS_gain [1/(Wm)] PE contribution \n", masked_PE)
print("SBS_gain [1/(Wm)] MB contribution \n", masked_MB)
print("SBS_gain [1/(Wm)] total \n", masked)
# Construct the SBS gain spectrum, built from Lorentzian peaks of the individual modes.
freq_min = 9.1 # GHz
freq_max = 9.4 # GHz
plotting.gain_spectra(sim_AC, SBS_gain, SBS_gain_PE, SBS_gain_MB, linewidth_Hz, k_AC,
EM_ival_pump, EM_ival_Stokes, AC_ival, freq_min=freq_min, freq_max=freq_max,
prefix_str=prefix_str, suffix_str='', pdf_png='png')
end = time.time()
print("\n Simulation time (sec.)", (end - start))
|
bjornsturmberg/NumBAT
|
lit_examples/simo-lit_05-Van_Laer-NJP_2015.py
|
Python
|
gpl-3.0
| 4,372
|
[
"CRYSTAL"
] |
cc33ebe9ff7a498ac282be160a35024f20e5ae103fc31701886546f424c8073d
|
from functools import lru_cache
magic = 1362
# test data
# magic = 10
# goal_x = 7
# goal_y = 4
# a
goal_x = 31
goal_y = 39
# b
max_steps = 51
visited = {}
@lru_cache(None)
def is_wall(x, y):
factor = magic + x * x + 3 * x + 2 * x * y + y + y * y
bits = 0
while True:
bits += factor & 1
factor >>= 1
if not factor:
break
return bits % 2
def is_visited(x, y):
return y in visited and x in visited[y]
def visit(x, y):
if y not in visited:
visited[y] = {}
visited[y][x] = True
def cnt():
c = 0
for y in visited:
c += len(visited[y])
return c
def draw():
for y in range(0, 7):
s = ''
for x in range(0, 10):
s += '# ' if is_wall(x, y) else '. '
print(s)
queue = [(1, 1, 0)]
while queue:
x, y, steps = queue.pop(0)
if max_steps and steps == max_steps:
print("We could have visited " + str(cnt()) + " locations before we got here")
break
visit(x, y)
if x == goal_x and y == goal_y:
print("We did it! We're like .. the best: " + str(steps))
break
if not is_wall(x+1, y) and not is_visited(x+1, y):
queue.append((x+1, y, steps+1))
if not is_wall(x, y+1) and not is_visited(x, y+1):
queue.append((x, y+1, steps+1))
if x > 0 and not is_wall(x-1, y) and not is_visited(x-1, y):
queue.append((x-1, y, steps+1))
if y > 0 and not is_wall(x, y-1) and not is_visited(x, y-1):
queue.append((x, y-1, steps+1))
|
matslindh/codingchallenges
|
adventofcode2016/13.py
|
Python
|
mit
| 1,553
|
[
"VisIt"
] |
590e83c6d985286aee576bb7efae6c4546d28005194b4980531dadae37714eed
|
########################################################################
# $HeadURL$
########################################################################
""" DIRAC FileCatalog utilities
"""
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.List import intListToString
def getIDSelectString( ids ):
"""
:param ids: input IDs - can be single int, list or tuple or a SELECT string
:return: Select string
"""
if isinstance( ids, basestring ) and ids.lower().startswith( 'select' ):
idString = ids
elif isinstance( ids, ( int, long ) ):
idString = '%d' % ids
elif isinstance( ids, ( tuple, list) ):
idString = intListToString( ids )
else:
return S_ERROR( 'Illegal fileID' )
return S_OK( idString )
|
andresailer/DIRAC
|
DataManagementSystem/DB/FileCatalogComponents/Utilities.py
|
Python
|
gpl-3.0
| 764
|
[
"DIRAC"
] |
227e1496a189b9e4b4c8ff2a62242f635acd5533f7629e667b37f54f78c2f9c0
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import re
from mooseutils.yaml_load import yaml_load
import MooseDocs
from ..common import exceptions
from ..tree import tokens
from . import core, command
def make_extension(**kwargs):
return PackageExtension(**kwargs)
class PackageExtension(command.CommandExtension):
"""
Adds ability to link to MOOSE enviornment packages.
"""
@staticmethod
def defaultConfig():
packages_config = yaml_load(os.path.join(MooseDocs.MOOSE_DIR,
'framework',
'doc',
'packages_config.yml'))
config = command.CommandExtension.defaultConfig()
# Assign a key/value for every item in packages_config.yml
for k, v in packages_config.items():
if k != 'moose_packages':
config[k] = (v, 'Default version for %s' % (k))
else:
config[k] = (v, 'MOOSE Environment installer package')
config['link'] = (r'http://www.mooseframework.org/moose_packages',
"Location of packages.")
return config
def extend(self, reader, renderer):
self.requires(core, command)
self.addCommand(reader, PackageCommand())
self.addCommand(reader, PackageCodeReplace())
self.addCommand(reader, PackageTextReplace())
class PackageCommand(command.CommandComponent):
"""
Replace arch with matching moose-environment package, as specified in
the yaml configuration file.
YAML Syntax:
moose_packages:
centos: moose-environment-1_centos.rpm
Markdown Syntax:
[!package!name arch=centos]
"""
COMMAND = 'package'
SUBCOMMAND = 'name'
@staticmethod
def defaultSettings():
settings = command.CommandComponent.defaultSettings()
settings['arch'] = (None, "The name of the OS package name to retrieve.")
return settings
def createToken(self, parent, info, page):
arch = self.settings['arch']
packages = self.extension.get('moose_packages', dict())
if arch not in packages:
msg = "The supplied value for the 'arch' settings, {}, was not found."
raise exceptions.MooseDocsException(msg, arch)
href = os.path.join(self.extension.get('link'), packages[arch])
core.Link(parent, url=str(href), string=str(packages[arch]))
return parent
class PackageCodeReplace(command.CommandComponent):
"""
Code block replace __PACKAGE_NAME__ with a corresponding version, as
specified in configuration file. You can specify the type of code block
by providing the language=value command.
The default language (if not provided) is bash: language=bash
YAML Syntax:
gcc: 7.3.0
Markdown Syntax:
!package! code
/path/to/gcc-__GCC__
"""
COMMAND = 'package'
SUBCOMMAND = 'code'
@staticmethod
def defaultSettings():
settings = command.CommandComponent.defaultSettings()
settings['max-height'] = ('350px', "The default height for listing content.")
settings['language'] = ('bash', "The language to use for highlighting, if not supplied " \
"it will be inferred from the extension (if possible).")
return settings
def createToken(self, parent, info, page):
content = info['inline'] if 'inline' in info else info['block']
content = re.sub(r'__(?P<package>[A-Z][A-Z_]+)__', self._subFunction, content,
flags=re.UNICODE)
core.Code(parent, style="max-height:{};".format(self.settings['max-height']),
language=self.settings['language'], content=content)
return parent
def _subFunction(self, match):
version = self.extension.get(match.group('package').lower(), None)
if version is not None:
return str(version)
return match.group(0)
class PackageTextReplace(command.CommandComponent):
"""
In-line package name replacement with a corresponding version, as
specified in the configuration file.
YAML Syntax:
gcc: 7.3.0
Markdown Syntax:
This is a sentence with gcc-[!package!gcc]
yields:
"This is a sentence with gcc-7.3.0"
"""
COMMAND = 'package'
SUBCOMMAND = '*'
@staticmethod
def defaultSettings():
settings = command.CommandComponent.defaultSettings()
return settings
def createToken(self, parent, info, page):
content = self.extension.get(info['subcommand'], dict())
tokens.String(parent, content=str(content))
return parent
|
nuclear-wizard/moose
|
python/MooseDocs/extensions/package.py
|
Python
|
lgpl-2.1
| 5,047
|
[
"MOOSE"
] |
cbb70ec0a5098851c6f12843954575ea12abc1677c126faa7931d1024942ac28
|
# ; -*- mode: Python;-*-
from modelUtils.distanceDelays import makeDelayedLaterals
#JABALERT: Should update the docstring once the GCA paper has been
# accepted or at least submitted.
"""
GCAL
Work in progress on an improved version of the LISSOM orientation map
simulation from figure 5.9 of Miikkulainen, Bednar, Choe, and Sirosh
(2005), Computational Maps in the Visual Cortex, Springer. Important
differences include:
- Using divisive normalization to the LGN to provide contrast gain control (GC)
and contrast-invariant tuning
- Using homeostatic adaptation (A) rather than manual threshold adjustment,
to avoid the need for most parameter adjustment and to be more robust
- Using a fixed lateral excitatory radius rather than shrinking it
(now that homeostatic plasticity allows all neurons to develop robustly)
$Id$
"""
__version__='$Revision$'
from math import pi
import numpy, sys, os, pickle
import param
from topo import learningfn,numbergen,transferfn,pattern,projection,responsefn,sheet
import topo.learningfn.optimized
import topo.learningfn.projfn
import topo.transferfn.optimized
import topo.pattern.random
import topo.pattern.image
import topo.responsefn.optimized
import topo.sheet.lissom
import topo.sheet.optimized
import topo.transferfn.misc
from topo.base.arrayutil import DivideWithConstant
import topo.analysis.featureresponses
# Parameters that can be passed on the command line using -p
from topo.misc.commandline import global_params as p
def makeParams():
p.add(
dataset=param.ObjectSelector(default='Gaussian',objects=
['Gaussian','Nature'],doc="""
Set of input patterns to use::
:'Gaussian': Two-dimensional Gaussians
:'Nature': Shouval's 1999 monochrome 256x256 images"""),
num_inputs=param.Integer(default=2,bounds=(1,None),doc="""
How many input patterns to present per unit area at each
iteration, when using discrete patterns (e.g. Gaussians)."""),
area=param.Number(default=1.0,bounds=(0,None),
inclusive_bounds=(False,True),doc="""
Linear size of cortical area to simulate.
2.0 gives a 2.0x2.0 Sheet area in V1."""),
retina_density=param.Number(default=24.0,bounds=(0,None),
inclusive_bounds=(False,True),doc="""
The nominal_density to use for the retina."""),
lgn_density=param.Number(default=24.0,bounds=(0,None),
inclusive_bounds=(False,True),doc="""
The nominal_density to use for the LGN."""),
cortex_density=param.Number(default=48.0,bounds=(0,None),
inclusive_bounds=(False,True),doc="""
The nominal_density to use for V1."""),
scale=param.Number(default=0.7,inclusive_bounds=(False,True),doc="""
Brightness of the input patterns"""),
aff_strength=param.Number(default=1.5,bounds=(0.0,None),doc="""
Overall strength of the afferent projection to V1."""),
exc_strength=param.Number(default=1.7,bounds=(0.0,None),doc="""
Overall strength of the lateral excitatory projection to V1."""),
inh_strength=param.Number(default=1.4,bounds=(0.0,None),doc="""
Overall strength of the lateral inhibitory projection to V1."""),
aff_lr=param.Number(default=0.1,bounds=(0.0,None),doc="""
Learning rate for the afferent projection to V1."""),
exc_lr=param.Number(default=0.0,bounds=(0.0,None),doc="""
Learning rate for the lateral excitatory projection to V1."""),
inh_lr=param.Number(default=0.3,bounds=(0.0,None),doc="""
Learning rate for the lateral inhibitory projection to V1."""))
return p
def makeSheets(p):
### Specify weight initialization, response function, and learning function
projection.CFProjection.cf_shape=pattern.Disk(smoothing=0.0)
projection.CFProjection.response_fn=responsefn.optimized.CFPRF_DotProduct_opt()
projection.CFProjection.learning_fn=learningfn.optimized.CFPLF_Hebbian_opt()
projection.CFProjection.weights_output_fns=[transferfn.optimized.CFPOF_DivisiveNormalizeL1_opt()]
projection.SharedWeightCFProjection.response_fn=responsefn.optimized.CFPRF_DotProduct_opt()
combined_inputs = GCALStimulusPattern(p, "Gaussian")
topo.sim['Retina']=sheet.GeneratorSheet(nominal_density=p.retina_density,
input_generator=combined_inputs, period=1.0, phase=0.05,
nominal_bounds=sheet.BoundingBox(radius=p.area/2.0+0.25+0.375+0.5))
# LGN has lateral connections for divisive normalization
for s in ['LGNOn','LGNOff']:
topo.sim[s]=sheet.optimized.LISSOM_Opt(nominal_density=p.lgn_density,
nominal_bounds=sheet.BoundingBox(radius=p.area/2.0+0.25+0.5),
output_fns=[transferfn.misc.HalfRectify()],
tsettle=2,strict_tsettle=1,measure_maps=False)
topo.sim['V1'] = sheet.lissom.LISSOM(nominal_density=p.cortex_density,
tsettle=16, plastic=True,
nominal_bounds=sheet.BoundingBox(radius=p.area/2.0),
output_fns=[transferfn.misc.HomeostaticResponse()])
topo.sim['V1'].joint_norm_fn=topo.sheet.optimized.compute_joint_norm_totals_opt
return combined_inputs
def GCALStimulusPattern(p, patType="Gaussian"):
if patType != p.dataset: print "*WARNING*: Pattern class mismatch for GCAL"
if p.dataset=="Gaussian":
input_type=pattern.Gaussian
total_num_inputs=int(p.num_inputs*p.area*p.area)
inputs=[input_type(x=numbergen.UniformRandom(lbound=-(p.area/2.0+0.25),
ubound= (p.area/2.0+0.25),seed=12+i),
y=numbergen.UniformRandom(lbound=-(p.area/2.0+0.25),
ubound= (p.area/2.0+0.25),seed=35+i),
orientation=numbergen.UniformRandom(lbound=-pi,ubound=pi,seed=21+i),
# CEBALERT: is this used?
bounds=sheet.BoundingBox(radius=1.125),
size=0.088388, aspect_ratio=4.66667, scale=p.scale)
for i in xrange(total_num_inputs)]
combined_inputs = pattern.SeparatedComposite(min_separation=0,generators=inputs)
elif p.dataset=="Nature":
input_type=pattern.image.FileImage
image_filenames=["images/shouval/combined%02d.png"%(i+1) for i in xrange(25)]
inputs=[input_type(filename=f,
size=10.0, #size_normalization='original',(size=10.0)
x=numbergen.UniformRandom(lbound=-0.75,ubound=0.75,seed=12),
y=numbergen.UniformRandom(lbound=-0.75,ubound=0.75,seed=36),
orientation=numbergen.UniformRandom(lbound=-pi,ubound=pi,seed=65))
for f in image_filenames]
combined_inputs =pattern.Selector(generators=inputs)
return combined_inputs
### Connections
def connectLGNLaterals(LGNRingNo, PLOT):
boundsChangeList = []
# LGN has lateral connections for divisive normalization
for s in ['LGNOn','LGNOff']:
lgn_surroundg = pattern.Gaussian(size=0.25,aspect_ratio=1.0,
output_fns=[transferfn.DivisiveNormalizeL1()])
connectionParams = {'delay':0.05, 'name':'LateralGC',
'dest_port':('Activity'), 'activity_group':(0.6,DivideWithConstant(c=0.11)),
'connection_type': projection.SharedWeightCFProjection,
'strength':0.6, 'weights_generator':lgn_surroundg,
'nominal_bounds_template':sheet.BoundingBox(radius=0.25)}
boundsChanged = makeDelayedLaterals(s, ('GC%s' % s), connectionParams, LGNRingNo,
pattern.Gaussian, {'size':0.25,'aspect_ratio':1.0,
'output_fns':[transferfn.DivisiveNormalizeL1()]} )
boundsChangeList.append(boundsChanged)
return boundsChangeList
def connectFeedForward(p):
# Components of DoG weights for the LGN
centerg = pattern.Gaussian(size=0.07385,aspect_ratio=1.0,
output_fns=[transferfn.DivisiveNormalizeL1()])
surroundg = pattern.Gaussian(size=0.29540,aspect_ratio=1.0,
output_fns=[transferfn.DivisiveNormalizeL1()])
# DoG weights for the LGN (center surround on and off)
on_weights = pattern.Composite(
generators=[centerg,surroundg],operator=numpy.subtract)
off_weights = pattern.Composite(
generators=[surroundg,centerg],operator=numpy.subtract)
topo.sim.connect(
'Retina','LGNOn',delay=0.05,strength=2.33,name='AfferentToLGNOn',
connection_type=projection.SharedWeightCFProjection,
nominal_bounds_template=sheet.BoundingBox(radius=0.375),
weights_generator=on_weights)
topo.sim.connect(
'Retina','LGNOff',delay=0.05,strength=2.33,name='AfferentToLGNOff',
connection_type=projection.SharedWeightCFProjection,
nominal_bounds_template=sheet.BoundingBox(radius=0.375),
weights_generator=off_weights)
topo.sim.connect(
'LGNOn','V1',delay=0.05,strength=p.aff_strength,name='LGNOnAfferent',
dest_port=('Activity','JointNormalize','Afferent'),
connection_type=projection.CFProjection,learning_rate=p.aff_lr,
nominal_bounds_template=sheet.BoundingBox(radius=0.27083),
weights_generator=pattern.random.GaussianCloud(gaussian_size=2*0.27083),
learning_fn=learningfn.optimized.CFPLF_Hebbian_opt())
topo.sim.connect(
'LGNOff','V1',delay=0.05,strength=p.aff_strength,name='LGNOffAfferent',
dest_port=('Activity','JointNormalize','Afferent'),
connection_type=projection.CFProjection,learning_rate=p.aff_lr,
nominal_bounds_template=sheet.BoundingBox(radius=0.27083),
weights_generator=pattern.random.GaussianCloud(gaussian_size=2*0.27083),
learning_fn=learningfn.optimized.CFPLF_Hebbian_opt())
def connectV1Laterals(p,V1RingNo):
######################################################################################
# <<<<<WARNING>>>>: CANNOT CHANGE BOUNDS AND SIZE IN CONNECTIONS POST-INITIALISATION #
######################################################################################
if V1RingNo not in [1, 'MAX']: print "**WARNING**: COUPLED RING COUNT FOR V1 LATERAL INH and EXC"
# topo.sim.connect(
# 'V1','V1',delay=0.05,strength=p.exc_strength,name='LateralExcitatory',
# connection_type=projection.CFProjection,learning_rate=p.exc_lr,
# nominal_bounds_template=sheet.BoundingBox(radius=0.104),
# weights_generator=pattern.Gaussian(aspect_ratio=1.0, size=0.05))
V1ExcParams = {'delay':0.05, 'strength':p.exc_strength, 'name':'LateralExcitatory',
'connection_type':projection.CFProjection, 'learning_rate':p.exc_lr,
'nominal_bounds_template':sheet.BoundingBox(radius=0.104),
'weights_generator':pattern.Gaussian(aspect_ratio=1.0, size=0.05)}
V1ExcBoundsChanged = makeDelayedLaterals('V1', 'LateralExcitatory', V1ExcParams, V1RingNo,
pattern.Gaussian, {'aspect_ratio':1.0, 'size':0.05})
# topo.sim.connect(
# 'V1','V1',delay=0.05,strength=-1.0*p.inh_strength,name='LateralInhibitory',
# connection_type=projection.CFProjection,learning_rate=p.inh_lr,
# nominal_bounds_template=sheet.BoundingBox(radius=0.22917),
# weights_generator=pattern.random.GaussianCloud(gaussian_size=0.15))
V1InhParams = {'delay':0.05, 'strength':-1.0*p.inh_strength, 'name':'LateralInhibitory',
'connection_type':projection.CFProjection,'learning_rate':p.inh_lr,
'nominal_bounds_template':sheet.BoundingBox(radius=0.22917),
'weights_generator':pattern.random.GaussianCloud(gaussian_size=0.15)}
V1InhBoundsChanged = makeDelayedLaterals('V1', 'LateralInhibitory' , V1InhParams, V1RingNo,
pattern.random.GaussianCloud, {'gaussian_size':0.15})
return [V1ExcBoundsChanged, V1InhBoundsChanged]
def config():
# Default locations for model editor
topo.sim.grid_layout([[None, 'V1', None],
['LGNOn', None, 'LGNOff'],
[None, 'Retina', None]], xstart=150,item_scale=0.8)
# Set up appropriate defaults for analysis
topo.analysis.featureresponses.FeatureMaps.selectivity_multiplier=2.0
topo.analysis.featureresponses.FeatureCurveCommand.apply_output_fns=True
topo.analysis.featureresponses.FeatureCurveCommand.curve_parameters=[{"contrast":1},
{"contrast":10},
{"contrast":30},
{"contrast":50},
{"contrast":100}]
def connectGCAL(p, LGNRingNo='MAX', V1RingNo='MAX',PLOT=True):
print "Connecting GCAL with %s LGN rings and %s V1 rings" % (str(LGNRingNo),str(V1RingNo))
LGNChangeList = connectLGNLaterals(LGNRingNo, PLOT)
connectFeedForward(p)
V1ChangeList = connectV1Laterals(p,V1RingNo)
config()
if True in (LGNChangeList+V1ChangeList): print "Bounds for a delay connection changed. Exiting.";sys.exit()
if __name__ == '__main__':
p =makeParams()
makeSheets(p)
connectGCAL(p, LGNRingNo=1, V1RingNo='MAX', PLOT=False)
config()
|
ioam/svn-history
|
contrib/JLStevens-TCAL/MastersVersion/models/gcal_vanilla.py
|
Python
|
bsd-3-clause
| 14,269
|
[
"Gaussian"
] |
53a87c27d98f5fae386c434221b4a17dc6c621039929b99bf52b34d7d3ac91b3
|
"""
Views for the verification flow
"""
import datetime
import decimal
import json
import logging
import urllib
from pytz import UTC
from ipware.ip import get_ip
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.db import transaction
from django.http import HttpResponse, HttpResponseBadRequest, Http404
from django.contrib.auth.models import User
from django.shortcuts import redirect
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _, ugettext_lazy
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.views.generic.base import View
import analytics
from eventtracking import tracker
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, UsageKey
from commerce.utils import audit_log, EcommerceService
from course_modes.models import CourseMode
from courseware.url_helpers import get_redirect_url
from edx_rest_api_client.exceptions import SlumberBaseException
from edxmako.shortcuts import render_to_response, render_to_string
from embargo import api as embargo_api
from openedx.core.djangoapps.commerce.utils import ecommerce_api_client
from openedx.core.djangoapps.user_api.accounts import NAME_MIN_LENGTH
from openedx.core.djangoapps.user_api.accounts.api import update_account_settings
from openedx.core.djangoapps.user_api.errors import UserNotFound, AccountValidationError
from openedx.core.djangoapps.credit.api import set_credit_requirement_status
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from student.models import CourseEnrollment
from shoppingcart.models import Order, CertificateItem
from shoppingcart.processors import (
get_signed_purchase_params, get_purchase_endpoint
)
from lms.djangoapps.verify_student.ssencrypt import has_valid_signature
from lms.djangoapps.verify_student.models import (
VerificationDeadline,
SoftwareSecurePhotoVerification,
VerificationCheckpoint,
VerificationStatus,
IcrvStatusEmailsConfiguration,
)
from lms.djangoapps.verify_student.image import decode_image_data, InvalidImageData
from util.json_request import JsonResponse
from util.date_utils import get_default_time_display
from util.db import outer_atomic
from xmodule.modulestore.django import modulestore
from django.contrib.staticfiles.storage import staticfiles_storage
log = logging.getLogger(__name__)
class PayAndVerifyView(View):
"""
View for the "verify and pay" flow.
This view is somewhat complicated, because the user
can enter it from a number of different places:
* From the "choose your track" page.
* After completing payment.
* From the dashboard in order to complete verification.
* From the dashboard in order to upgrade to a verified track.
The page will display different steps and requirements
depending on:
* Whether the user has submitted a photo verification recently.
* Whether the user has paid for the course.
* How the user reached the page (mostly affects messaging)
We are also super-paranoid about how users reach this page.
If they somehow aren't enrolled, or the course doesn't exist,
or they've unenrolled, or they've already paid/verified,
... then we try to redirect them to the page with the
most appropriate messaging (including the dashboard).
Note that this page does NOT handle re-verification
(photo verification that was denied or had an error);
that is handled by the "reverify" view.
"""
# Step definitions
#
# These represent the numbered steps a user sees in
# the verify / payment flow.
#
# Steps can either be:
# - displayed or hidden
# - complete or incomplete
#
# For example, when a user enters the verification/payment
# flow for the first time, the user will see steps
# for both payment and verification. As the user
# completes these steps (for example, submitting a photo)
# the steps will be marked "complete".
#
# If a user has already verified for another course,
# then the verification steps will be hidden,
# since the user has already completed them.
#
# If a user re-enters the flow from another application
# (for example, after completing payment through
# a third-party payment processor), then the user
# will resume the flow at an intermediate step.
#
INTRO_STEP = 'intro-step'
MAKE_PAYMENT_STEP = 'make-payment-step'
PAYMENT_CONFIRMATION_STEP = 'payment-confirmation-step'
FACE_PHOTO_STEP = 'face-photo-step'
ID_PHOTO_STEP = 'id-photo-step'
REVIEW_PHOTOS_STEP = 'review-photos-step'
ENROLLMENT_CONFIRMATION_STEP = 'enrollment-confirmation-step'
ALL_STEPS = [
INTRO_STEP,
MAKE_PAYMENT_STEP,
PAYMENT_CONFIRMATION_STEP,
FACE_PHOTO_STEP,
ID_PHOTO_STEP,
REVIEW_PHOTOS_STEP,
ENROLLMENT_CONFIRMATION_STEP
]
PAYMENT_STEPS = [
MAKE_PAYMENT_STEP,
PAYMENT_CONFIRMATION_STEP
]
VERIFICATION_STEPS = [
FACE_PHOTO_STEP,
ID_PHOTO_STEP,
REVIEW_PHOTOS_STEP,
ENROLLMENT_CONFIRMATION_STEP
]
# These steps can be skipped using the ?skip-first-step GET param
SKIP_STEPS = [
INTRO_STEP,
]
STEP_TITLES = {
INTRO_STEP: ugettext_lazy("Intro"),
MAKE_PAYMENT_STEP: ugettext_lazy("Make payment"),
PAYMENT_CONFIRMATION_STEP: ugettext_lazy("Payment confirmation"),
FACE_PHOTO_STEP: ugettext_lazy("Take photo"),
ID_PHOTO_STEP: ugettext_lazy("Take a photo of your ID"),
REVIEW_PHOTOS_STEP: ugettext_lazy("Review your info"),
ENROLLMENT_CONFIRMATION_STEP: ugettext_lazy("Enrollment confirmation"),
}
# Messages
#
# Depending on how the user entered reached the page,
# we will display different text messaging.
# For example, we show users who are upgrading
# slightly different copy than users who are verifying
# for the first time.
#
FIRST_TIME_VERIFY_MSG = 'first-time-verify'
VERIFY_NOW_MSG = 'verify-now'
VERIFY_LATER_MSG = 'verify-later'
UPGRADE_MSG = 'upgrade'
PAYMENT_CONFIRMATION_MSG = 'payment-confirmation'
# Requirements
#
# These explain to the user what he or she
# will need to successfully pay and/or verify.
#
# These are determined by the steps displayed
# to the user; for example, if the user does not
# need to complete the verification steps,
# then the photo ID and webcam requirements are hidden.
#
ACCOUNT_ACTIVATION_REQ = "account-activation-required"
PHOTO_ID_REQ = "photo-id-required"
WEBCAM_REQ = "webcam-required"
STEP_REQUIREMENTS = {
ID_PHOTO_STEP: [PHOTO_ID_REQ, WEBCAM_REQ],
FACE_PHOTO_STEP: [WEBCAM_REQ],
}
# Deadline types
VERIFICATION_DEADLINE = "verification"
UPGRADE_DEADLINE = "upgrade"
@method_decorator(login_required)
def get(
self, request, course_id,
always_show_payment=False,
current_step=None,
message=FIRST_TIME_VERIFY_MSG
):
"""
Render the payment and verification flow.
Arguments:
request (HttpRequest): The request object.
course_id (unicode): The ID of the course the user is trying
to enroll in.
Keyword Arguments:
always_show_payment (bool): If True, show the payment steps
even if the user has already paid. This is useful
for users returning to the flow after paying.
current_step (string): The current step in the flow.
message (string): The messaging to display.
Returns:
HttpResponse
Raises:
Http404: The course does not exist or does not
have a verified mode.
"""
# Parse the course key
# The URL regex should guarantee that the key format is valid.
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key)
# Verify that the course exists
if course is None:
log.warn(u"Could not find course with ID %s.", course_id)
raise Http404
# Check whether the user has access to this course
# based on country access rules.
redirect_url = embargo_api.redirect_if_blocked(
course_key,
user=request.user,
ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return redirect(redirect_url)
# If the verification deadline has passed
# then show the user a message that he/she can't verify.
#
# We're making the assumptions (enforced in Django admin) that:
#
# 1) Only verified modes have verification deadlines.
#
# 2) If set, verification deadlines are always AFTER upgrade deadlines, because why would you
# let someone upgrade into a verified track if they can't complete verification?
#
verification_deadline = VerificationDeadline.deadline_for_course(course.id)
response = self._response_if_deadline_passed(course, self.VERIFICATION_DEADLINE, verification_deadline)
if response is not None:
log.info(u"Verification deadline for '%s' has passed.", course.id)
return response
# Retrieve the relevant course mode for the payment/verification flow.
#
# WARNING: this is technical debt! A much better way to do this would be to
# separate out the payment flow and use the product SKU to figure out what
# the user is trying to purchase.
#
# Nonetheless, for the time being we continue to make the really ugly assumption
# that at some point there was a paid course mode we can query for the price.
relevant_course_mode = self._get_paid_mode(course_key)
# If we can find a relevant course mode, then log that we're entering the flow
# Otherwise, this course does not support payment/verification, so respond with a 404.
if relevant_course_mode is not None:
if CourseMode.is_verified_mode(relevant_course_mode):
log.info(
u"Entering payment and verification flow for user '%s', course '%s', with current step '%s'.",
request.user.id, course_id, current_step
)
else:
log.info(
u"Entering payment flow for user '%s', course '%s', with current step '%s'",
request.user.id, course_id, current_step
)
else:
# Otherwise, there has never been a verified/paid mode,
# so return a page not found response.
log.warn(
u"No paid/verified course mode found for course '%s' for verification/payment flow request",
course_id
)
raise Http404
# If the user is trying to *pay* and the upgrade deadline has passed,
# then they shouldn't be able to enter the flow.
#
# NOTE: This should match the availability dates used by the E-Commerce service
# to determine whether a user can purchase a product. The idea is that if the service
# won't fulfill the order, we shouldn't even let the user get into the payment flow.
#
user_is_trying_to_pay = message in [self.FIRST_TIME_VERIFY_MSG, self.UPGRADE_MSG]
if user_is_trying_to_pay:
upgrade_deadline = relevant_course_mode.expiration_datetime
response = self._response_if_deadline_passed(course, self.UPGRADE_DEADLINE, upgrade_deadline)
if response is not None:
log.info(u"Upgrade deadline for '%s' has passed.", course.id)
return response
# Check whether the user has verified, paid, and enrolled.
# A user is considered "paid" if he or she has an enrollment
# with a paid course mode (such as "verified").
# For this reason, every paid user is enrolled, but not
# every enrolled user is paid.
# If the course mode is not verified(i.e only paid) then already_verified is always True
already_verified = (
self._check_already_verified(request.user)
if CourseMode.is_verified_mode(relevant_course_mode)
else True
)
already_paid, is_enrolled = self._check_enrollment(request.user, course_key)
# Redirect the user to a more appropriate page if the
# messaging won't make sense based on the user's
# enrollment / payment / verification status.
sku_to_use = relevant_course_mode.sku
purchase_workflow = request.GET.get('purchase_workflow', 'single')
if purchase_workflow == 'bulk' and relevant_course_mode.bulk_sku:
sku_to_use = relevant_course_mode.bulk_sku
redirect_response = self._redirect_if_necessary(
message,
already_verified,
already_paid,
is_enrolled,
course_key,
user_is_trying_to_pay,
request.user,
sku_to_use
)
if redirect_response is not None:
return redirect_response
display_steps = self._display_steps(
always_show_payment,
already_verified,
already_paid,
relevant_course_mode
)
requirements = self._requirements(display_steps, request.user.is_active)
if current_step is None:
current_step = display_steps[0]['name']
# Allow the caller to skip the first page
# This is useful if we want the user to be able to
# use the "back" button to return to the previous step.
# This parameter should only work for known skip-able steps
if request.GET.get('skip-first-step') and current_step in self.SKIP_STEPS:
display_step_names = [step['name'] for step in display_steps]
current_step_idx = display_step_names.index(current_step)
if (current_step_idx + 1) < len(display_steps):
current_step = display_steps[current_step_idx + 1]['name']
courseware_url = ""
if not course.start or course.start < datetime.datetime.today().replace(tzinfo=UTC):
courseware_url = reverse(
'course_root',
kwargs={'course_id': unicode(course_key)}
)
full_name = (
request.user.profile.name
if request.user.profile.name
else ""
)
# If the user set a contribution amount on another page,
# use that amount to pre-fill the price selection form.
contribution_amount = request.session.get(
'donation_for_course', {}
).get(unicode(course_key), '')
# Remember whether the user is upgrading
# so we can fire an analytics event upon payment.
request.session['attempting_upgrade'] = (message == self.UPGRADE_MSG)
# Determine the photo verification status
verification_good_until = self._verification_valid_until(request.user)
# get available payment processors
if relevant_course_mode.sku:
# transaction will be conducted via ecommerce service
processors = ecommerce_api_client(request.user).payment.processors.get()
else:
# transaction will be conducted using legacy shopping cart
processors = [settings.CC_PROCESSOR_NAME]
# Render the top-level page
context = {
'contribution_amount': contribution_amount,
'course': course,
'course_key': unicode(course_key),
'checkpoint_location': request.GET.get('checkpoint'),
'course_mode': relevant_course_mode,
'courseware_url': courseware_url,
'current_step': current_step,
'disable_courseware_js': True,
'display_steps': display_steps,
'is_active': json.dumps(request.user.is_active),
'user_email': request.user.email,
'message_key': message,
'platform_name': configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME),
'processors': processors,
'requirements': requirements,
'user_full_name': full_name,
'verification_deadline': (
get_default_time_display(verification_deadline)
if verification_deadline else ""
),
'already_verified': already_verified,
'verification_good_until': verification_good_until,
'capture_sound': staticfiles_storage.url("audio/camera_capture.wav"),
'nav_hidden': True,
'is_ab_testing': 'begin-flow' in request.path,
}
return render_to_response("verify_student/pay_and_verify.html", context)
def _redirect_if_necessary(
self, message, already_verified, already_paid, is_enrolled, course_key, # pylint: disable=bad-continuation
user_is_trying_to_pay, user, sku # pylint: disable=bad-continuation
):
"""Redirect the user to a more appropriate page if necessary.
In some cases, a user may visit this page with
verification / enrollment / payment state that
we don't anticipate. For example, a user may unenroll
from the course after paying for it, then visit the
"verify now" page to complete verification.
When this happens, we try to redirect the user to
the most appropriate page.
Arguments:
message (string): The messaging of the page. Should be a key
in `MESSAGES`.
already_verified (bool): Whether the user has submitted
a verification request recently.
already_paid (bool): Whether the user is enrolled in a paid
course mode.
is_enrolled (bool): Whether the user has an active enrollment
in the course.
course_key (CourseKey): The key for the course.
Returns:
HttpResponse or None
"""
url = None
course_kwargs = {'course_id': unicode(course_key)}
if already_verified and already_paid:
# If they've already paid and verified, there's nothing else to do,
# so redirect them to the dashboard.
if message != self.PAYMENT_CONFIRMATION_MSG:
url = reverse('dashboard')
elif message in [self.VERIFY_NOW_MSG, self.VERIFY_LATER_MSG, self.PAYMENT_CONFIRMATION_MSG]:
if is_enrolled:
# If the user is already enrolled but hasn't yet paid,
# then the "upgrade" messaging is more appropriate.
if not already_paid:
url = reverse('verify_student_upgrade_and_verify', kwargs=course_kwargs)
else:
# If the user is NOT enrolled, then send him/her
# to the first time verification page.
url = reverse('verify_student_start_flow', kwargs=course_kwargs)
elif message == self.UPGRADE_MSG:
if is_enrolled:
if already_paid:
# If the student has paid, but not verified, redirect to the verification flow.
url = reverse('verify_student_verify_now', kwargs=course_kwargs)
else:
url = reverse('verify_student_start_flow', kwargs=course_kwargs)
if user_is_trying_to_pay and user.is_active and not already_paid:
# If the user is trying to pay, has activated their account, and the ecommerce service
# is enabled redirect him to the ecommerce checkout page.
ecommerce_service = EcommerceService()
if ecommerce_service.is_enabled(user):
url = ecommerce_service.checkout_page_url(sku)
# Redirect if necessary, otherwise implicitly return None
if url is not None:
return redirect(url)
def _get_paid_mode(self, course_key):
"""
Retrieve the paid course mode for a course.
The returned course mode may or may not be expired.
Unexpired modes are preferred to expired modes.
Arguments:
course_key (CourseKey): The location of the course.
Returns:
CourseMode tuple
"""
# Retrieve all the modes at once to reduce the number of database queries
all_modes, unexpired_modes = CourseMode.all_and_unexpired_modes_for_courses([course_key])
# Retrieve the first mode that matches the following criteria:
# * Unexpired
# * Price > 0
# * Not credit
for mode in unexpired_modes[course_key]:
if mode.min_price > 0 and not CourseMode.is_credit_mode(mode):
return mode
# Otherwise, find the first non credit expired paid mode
for mode in all_modes[course_key]:
if mode.min_price > 0 and not CourseMode.is_credit_mode(mode):
return mode
# Otherwise, return None and so the view knows to respond with a 404.
return None
def _display_steps(self, always_show_payment, already_verified, already_paid, course_mode):
"""Determine which steps to display to the user.
Includes all steps by default, but removes steps
if the user has already completed them.
Arguments:
always_show_payment (bool): If True, display the payment steps
even if the user has already paid.
already_verified (bool): Whether the user has submitted
a verification request recently.
already_paid (bool): Whether the user is enrolled in a paid
course mode.
Returns:
list
"""
display_steps = self.ALL_STEPS
remove_steps = set()
if already_verified or not CourseMode.is_verified_mode(course_mode):
remove_steps |= set(self.VERIFICATION_STEPS)
if already_paid and not always_show_payment:
remove_steps |= set(self.PAYMENT_STEPS)
else:
# The "make payment" step doubles as an intro step,
# so if we're showing the payment step, hide the intro step.
remove_steps |= set([self.INTRO_STEP])
return [
{
'name': step,
'title': unicode(self.STEP_TITLES[step]),
}
for step in display_steps
if step not in remove_steps
]
def _requirements(self, display_steps, is_active):
"""Determine which requirements to show the user.
For example, if the user needs to submit a photo
verification, tell the user that she will need
a photo ID and a webcam.
Arguments:
display_steps (list): The steps to display to the user.
is_active (bool): If False, adds a requirement to activate the user account.
Returns:
dict: Keys are requirement names, values are booleans
indicating whether to show the requirement.
"""
all_requirements = {
self.ACCOUNT_ACTIVATION_REQ: not is_active,
self.PHOTO_ID_REQ: False,
self.WEBCAM_REQ: False,
}
display_steps = set(step['name'] for step in display_steps)
for step, step_requirements in self.STEP_REQUIREMENTS.iteritems():
if step in display_steps:
for requirement in step_requirements:
all_requirements[requirement] = True
return all_requirements
def _verification_valid_until(self, user, date_format="%m/%d/%Y"):
"""
Check whether the user has a valid or pending verification.
Arguments:
user:
date_format: optional parameter for formatting datetime
object to string in response
Returns:
datetime object in string format
"""
photo_verifications = SoftwareSecurePhotoVerification.verification_valid_or_pending(user)
# return 'expiration_datetime' of latest photo verification if found,
# otherwise implicitly return ''
if photo_verifications:
return photo_verifications[0].expiration_datetime.strftime(date_format)
return ''
def _check_already_verified(self, user):
"""Check whether the user has a valid or pending verification.
Note that this includes cases in which the user's verification
has not been accepted (either because it hasn't been processed,
or there was an error).
This should return True if the user has done their part:
submitted photos within the expiration period.
"""
return SoftwareSecurePhotoVerification.user_has_valid_or_pending(user)
def _check_enrollment(self, user, course_key):
"""Check whether the user has an active enrollment and has paid.
If a user is enrolled in a paid course mode, we assume
that the user has paid.
Arguments:
user (User): The user to check.
course_key (CourseKey): The key of the course to check.
Returns:
Tuple `(has_paid, is_active)` indicating whether the user
has paid and whether the user has an active account.
"""
enrollment_mode, is_active = CourseEnrollment.enrollment_mode_for_user(user, course_key)
has_paid = False
if enrollment_mode is not None and is_active:
all_modes = CourseMode.modes_for_course_dict(course_key, include_expired=True)
course_mode = all_modes.get(enrollment_mode)
has_paid = (course_mode and course_mode.min_price > 0)
return (has_paid, bool(is_active))
def _response_if_deadline_passed(self, course, deadline_name, deadline_datetime):
"""
Respond with some error messaging if the deadline has passed.
Arguments:
course (Course): The course the user is trying to enroll in.
deadline_name (str): One of the deadline constants.
deadline_datetime (datetime): The deadline.
Returns: HttpResponse or None
"""
if deadline_name not in [self.VERIFICATION_DEADLINE, self.UPGRADE_DEADLINE]:
log.error("Invalid deadline name %s. Skipping check for whether the deadline passed.", deadline_name)
return None
deadline_passed = (
deadline_datetime is not None and
deadline_datetime < datetime.datetime.now(UTC)
)
if deadline_passed:
context = {
'course': course,
'deadline_name': deadline_name,
'deadline': (
get_default_time_display(deadline_datetime)
if deadline_datetime else ""
)
}
return render_to_response("verify_student/missed_deadline.html", context)
def checkout_with_ecommerce_service(user, course_key, course_mode, processor):
""" Create a new basket and trigger immediate checkout, using the E-Commerce API. """
course_id = unicode(course_key)
try:
api = ecommerce_api_client(user)
# Make an API call to create the order and retrieve the results
result = api.baskets.post({
'products': [{'sku': course_mode.sku}],
'checkout': True,
'payment_processor_name': processor
})
# Pass the payment parameters directly from the API response.
return result.get('payment_data')
except SlumberBaseException:
params = {'username': user.username, 'mode': course_mode.slug, 'course_id': course_id}
log.exception('Failed to create order for %(username)s %(mode)s mode of %(course_id)s', params)
raise
finally:
audit_log(
'checkout_requested',
course_id=course_id,
mode=course_mode.slug,
processor_name=processor,
user_id=user.id
)
def checkout_with_shoppingcart(request, user, course_key, course_mode, amount):
""" Create an order and trigger checkout using shoppingcart."""
cart = Order.get_cart_for_user(user)
cart.clear()
enrollment_mode = course_mode.slug
CertificateItem.add_to_order(cart, course_key, amount, enrollment_mode)
# Change the order's status so that we don't accidentally modify it later.
# We need to do this to ensure that the parameters we send to the payment system
# match what we store in the database.
# (Ordinarily we would do this client-side when the user submits the form, but since
# the JavaScript on this page does that immediately, we make the change here instead.
# This avoids a second AJAX call and some additional complication of the JavaScript.)
# If a user later re-enters the verification / payment flow, she will create a new order.
cart.start_purchase()
callback_url = request.build_absolute_uri(
reverse("shoppingcart.views.postpay_callback")
)
payment_data = {
'payment_processor_name': settings.CC_PROCESSOR_NAME,
'payment_page_url': get_purchase_endpoint(),
'payment_form_data': get_signed_purchase_params(
cart,
callback_url=callback_url,
extra_data=[unicode(course_key), course_mode.slug]
),
}
return payment_data
@require_POST
@login_required
def create_order(request):
"""
This endpoint is named 'create_order' for backward compatibility, but its
actual use is to add a single product to the user's cart and request
immediate checkout.
"""
course_id = request.POST['course_id']
course_id = CourseKey.from_string(course_id)
donation_for_course = request.session.get('donation_for_course', {})
contribution = request.POST.get("contribution", donation_for_course.get(unicode(course_id), 0))
try:
amount = decimal.Decimal(contribution).quantize(decimal.Decimal('.01'), rounding=decimal.ROUND_DOWN)
except decimal.InvalidOperation:
return HttpResponseBadRequest(_("Selected price is not valid number."))
current_mode = None
sku = request.POST.get('sku', None)
if sku:
try:
current_mode = CourseMode.objects.get(sku=sku)
except CourseMode.DoesNotExist:
log.exception(u'Failed to find CourseMode with SKU [%s].', sku)
if not current_mode:
# Check if there are more than 1 paid(mode with min_price>0 e.g verified/professional/no-id-professional) modes
# for course exist then choose the first one
paid_modes = CourseMode.paid_modes_for_course(course_id)
if paid_modes:
if len(paid_modes) > 1:
log.warn(u"Multiple paid course modes found for course '%s' for create order request", course_id)
current_mode = paid_modes[0]
# Make sure this course has a paid mode
if not current_mode:
log.warn(u"Create order requested for course '%s' without a paid mode.", course_id)
return HttpResponseBadRequest(_("This course doesn't support paid certificates"))
if CourseMode.is_professional_mode(current_mode):
amount = current_mode.min_price
if amount < current_mode.min_price:
return HttpResponseBadRequest(_("No selected price or selected price is below minimum."))
if current_mode.sku:
# if request.POST doesn't contain 'processor' then the service's default payment processor will be used.
payment_data = checkout_with_ecommerce_service(
request.user,
course_id,
current_mode,
request.POST.get('processor')
)
else:
payment_data = checkout_with_shoppingcart(request, request.user, course_id, current_mode, amount)
if 'processor' not in request.POST:
# (XCOM-214) To be removed after release.
# the absence of this key in the POST payload indicates that the request was initiated from
# a stale js client, which expects a response containing only the 'payment_form_data' part of
# the payment data result.
payment_data = payment_data['payment_form_data']
return HttpResponse(json.dumps(payment_data), content_type="application/json")
class SubmitPhotosView(View):
"""
End-point for submitting photos for verification.
"""
@method_decorator(transaction.non_atomic_requests)
def dispatch(self, *args, **kwargs): # pylint: disable=missing-docstring
return super(SubmitPhotosView, self).dispatch(*args, **kwargs)
@method_decorator(login_required)
@method_decorator(outer_atomic(read_committed=True))
def post(self, request):
"""
Submit photos for verification.
This end-point is used for the following cases:
* Initial verification through the pay-and-verify flow.
* Initial verification initiated from a checkpoint within a course.
* Re-verification initiated from a checkpoint within a course.
POST Parameters:
face_image (str): base64-encoded image data of the user's face.
photo_id_image (str): base64-encoded image data of the user's photo ID.
full_name (str): The user's full name, if the user is requesting a name change as well.
course_key (str): Identifier for the course, if initiated from a checkpoint.
checkpoint (str): Location of the checkpoint in the course.
"""
# If the user already has an initial verification attempt, we can re-use the photo ID
# the user submitted with the initial attempt. This is useful for the in-course reverification
# case in which users submit only the face photo and have it matched against their ID photos
# submitted with the initial verification.
initial_verification = SoftwareSecurePhotoVerification.get_initial_verification(request.user)
# Validate the POST parameters
params, response = self._validate_parameters(request, bool(initial_verification))
if response is not None:
return response
# If necessary, update the user's full name
if "full_name" in params:
response = self._update_full_name(request.user, params["full_name"])
if response is not None:
return response
# Retrieve the image data
# Validation ensures that we'll have a face image, but we may not have
# a photo ID image if this is a reverification.
face_image, photo_id_image, response = self._decode_image_data(
params["face_image"], params.get("photo_id_image")
)
# If we have a photo_id we do not want use the initial verification image.
if photo_id_image is not None:
initial_verification = None
if response is not None:
return response
# Submit the attempt
attempt = self._submit_attempt(request.user, face_image, photo_id_image, initial_verification)
# If this attempt was submitted at a checkpoint, then associate
# the attempt with the checkpoint.
submitted_at_checkpoint = "checkpoint" in params and "course_key" in params
if submitted_at_checkpoint:
checkpoint = self._associate_attempt_with_checkpoint(
request.user, attempt,
params["course_key"],
params["checkpoint"]
)
# If the submission came from an in-course checkpoint
if initial_verification is not None and submitted_at_checkpoint:
self._fire_event(request.user, "edx.bi.reverify.submitted", {
"category": "verification",
"label": unicode(params["course_key"]),
"checkpoint": checkpoint.checkpoint_name,
})
# Send a URL that the client can redirect to in order
# to return to the checkpoint in the courseware.
redirect_url = get_redirect_url(params["course_key"], params["checkpoint"])
return JsonResponse({"url": redirect_url})
# Otherwise, the submission came from an initial verification flow.
else:
self._fire_event(request.user, "edx.bi.verify.submitted", {"category": "verification"})
self._send_confirmation_email(request.user)
redirect_url = None
return JsonResponse({})
def _validate_parameters(self, request, has_initial_verification):
"""
Check that the POST parameters are valid.
Arguments:
request (HttpRequest): The request object.
has_initial_verification (bool): Whether the user has an initial verification attempt.
Returns:
HttpResponse or None
"""
# Pull out the parameters we care about.
params = {
param_name: request.POST[param_name]
for param_name in [
"face_image",
"photo_id_image",
"course_key",
"checkpoint",
"full_name"
]
if param_name in request.POST
}
# If the user already has an initial verification attempt, then we don't
# require the user to submit a photo ID image, since we can re-use the photo ID
# image from the initial attempt.
# If we don't have an initial verification OR a photo ID image, something has gone
# terribly wrong in the JavaScript. Log this as an error so we can track it down.
if "photo_id_image" not in params and not has_initial_verification:
log.error(
(
"User %s does not have an initial verification attempt "
"and no photo ID image data was provided. "
"This most likely means that the JavaScript client is not "
"correctly constructing the request to submit photos."
), request.user.id
)
return None, HttpResponseBadRequest(
_("Photo ID image is required if the user does not have an initial verification attempt.")
)
# The face image is always required.
if "face_image" not in params:
msg = _("Missing required parameter face_image")
return None, HttpResponseBadRequest(msg)
# If provided, parse the course key and checkpoint location
if "course_key" in params:
try:
params["course_key"] = CourseKey.from_string(params["course_key"])
except InvalidKeyError:
return None, HttpResponseBadRequest(_("Invalid course key"))
if "checkpoint" in params:
try:
params["checkpoint"] = UsageKey.from_string(params["checkpoint"]).replace(
course_key=params["course_key"]
)
except InvalidKeyError:
return None, HttpResponseBadRequest(_("Invalid checkpoint location"))
return params, None
def _update_full_name(self, user, full_name):
"""
Update the user's full name.
Arguments:
user (User): The user to update.
full_name (unicode): The user's updated full name.
Returns:
HttpResponse or None
"""
try:
update_account_settings(user, {"name": full_name})
except UserNotFound:
return HttpResponseBadRequest(_("No profile found for user"))
except AccountValidationError:
msg = _(
"Name must be at least {min_length} characters long."
).format(min_length=NAME_MIN_LENGTH)
return HttpResponseBadRequest(msg)
def _decode_image_data(self, face_data, photo_id_data=None):
"""
Decode image data sent with the request.
Arguments:
face_data (str): base64-encoded face image data.
Keyword Arguments:
photo_id_data (str): base64-encoded photo ID image data.
Returns:
tuple of (str, str, HttpResponse)
"""
try:
# Decode face image data (used for both an initial and re-verification)
face_image = decode_image_data(face_data)
# Decode the photo ID image data if it's provided
photo_id_image = (
decode_image_data(photo_id_data)
if photo_id_data is not None else None
)
return face_image, photo_id_image, None
except InvalidImageData:
msg = _("Image data is not valid.")
return None, None, HttpResponseBadRequest(msg)
def _submit_attempt(self, user, face_image, photo_id_image=None, initial_verification=None):
"""
Submit a verification attempt.
Arguments:
user (User): The user making the attempt.
face_image (str): Decoded face image data.
Keyword Arguments:
photo_id_image (str or None): Decoded photo ID image data.
initial_verification (SoftwareSecurePhotoVerification): The initial verification attempt.
"""
attempt = SoftwareSecurePhotoVerification(user=user)
# We will always have face image data, so upload the face image
attempt.upload_face_image(face_image)
# If an ID photo wasn't submitted, re-use the ID photo from the initial attempt.
# Earlier validation rules ensure that at least one of these is available.
if photo_id_image is not None:
attempt.upload_photo_id_image(photo_id_image)
elif initial_verification is None:
# Earlier validation should ensure that we never get here.
log.error(
"Neither a photo ID image or initial verification attempt provided. "
"Parameter validation in the view should prevent this from happening!"
)
# Submit the attempt
attempt.mark_ready()
attempt.submit(copy_id_photo_from=initial_verification)
return attempt
def _associate_attempt_with_checkpoint(self, user, attempt, course_key, usage_id):
"""
Associate the verification attempt with a checkpoint within a course.
Arguments:
user (User): The user making the attempt.
attempt (SoftwareSecurePhotoVerification): The verification attempt.
course_key (CourseKey): The identifier for the course.
usage_key (UsageKey): The location of the checkpoint within the course.
Returns:
VerificationCheckpoint
"""
checkpoint = VerificationCheckpoint.get_or_create_verification_checkpoint(course_key, usage_id)
checkpoint.add_verification_attempt(attempt)
VerificationStatus.add_verification_status(checkpoint, user, "submitted")
return checkpoint
def _send_confirmation_email(self, user):
"""
Send an email confirming that the user submitted photos
for initial verification.
"""
context = {
'full_name': user.profile.name,
'platform_name': configuration_helpers.get_value("PLATFORM_NAME", settings.PLATFORM_NAME)
}
subject = _("Verification photos received")
message = render_to_string('emails/photo_submission_confirmation.txt', context)
from_address = configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
to_address = user.email
try:
send_mail(subject, message, from_address, [to_address], fail_silently=False)
except: # pylint: disable=bare-except
# We catch all exceptions and log them.
# It would be much, much worse to roll back the transaction due to an uncaught
# exception than to skip sending the notification email.
log.exception("Could not send notification email for initial verification for user %s", user.id)
def _fire_event(self, user, event_name, parameters):
"""
Fire an analytics event.
Arguments:
user (User): The user who submitted photos.
event_name (str): Name of the analytics event.
parameters (dict): Event parameters.
Returns: None
"""
if settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
context = {
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
analytics.track(user.id, event_name, parameters, context=context)
def _compose_message_reverification_email(
course_key, user_id, related_assessment_location, status, request
): # pylint: disable=invalid-name
"""
Compose subject and message for photo reverification email.
Args:
course_key(CourseKey): CourseKey object
user_id(str): User Id
related_assessment_location(str): Location of reverification XBlock
photo_verification(QuerySet): Queryset of SoftwareSecure objects
status(str): Approval status
is_secure(Bool): Is running on secure protocol or not
Returns:
None if any error occurred else Tuple of subject and message strings
"""
try:
usage_key = UsageKey.from_string(related_assessment_location)
reverification_block = modulestore().get_item(usage_key)
course = modulestore().get_course(course_key)
redirect_url = get_redirect_url(course_key, usage_key.replace(course_key=course_key))
subject = "Re-verification Status"
context = {
"status": status,
"course_name": course.display_name_with_default_escaped,
"assessment": reverification_block.related_assessment
}
# Allowed attempts is 1 if not set on verification block
allowed_attempts = reverification_block.attempts + 1
used_attempts = VerificationStatus.get_user_attempts(user_id, course_key, related_assessment_location)
left_attempts = allowed_attempts - used_attempts
is_attempt_allowed = left_attempts > 0
verification_open = True
if reverification_block.due:
verification_open = timezone.now() <= reverification_block.due
context["left_attempts"] = left_attempts
context["is_attempt_allowed"] = is_attempt_allowed
context["verification_open"] = verification_open
context["due_date"] = get_default_time_display(reverification_block.due)
context['platform_name'] = configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
context["used_attempts"] = used_attempts
context["allowed_attempts"] = allowed_attempts
context["support_link"] = configuration_helpers.get_value('email_from_address', settings.CONTACT_EMAIL)
re_verification_link = reverse(
'verify_student_incourse_reverify',
args=(
unicode(course_key),
related_assessment_location
)
)
context["course_link"] = request.build_absolute_uri(redirect_url)
context["reverify_link"] = request.build_absolute_uri(re_verification_link)
message = render_to_string('emails/reverification_processed.txt', context)
log.info(
"Sending email to User_Id=%s. Attempts left for this user are %s. "
"Allowed attempts %s. "
"Due Date %s",
str(user_id), left_attempts, allowed_attempts, str(reverification_block.due)
)
return subject, message
# Catch all exception to avoid raising back to view
except: # pylint: disable=bare-except
log.exception("The email for re-verification sending failed for user_id %s", user_id)
def _send_email(user_id, subject, message):
""" Send email to given user
Args:
user_id(str): User Id
subject(str): Subject lines of emails
message(str): Email message body
Returns:
None
"""
from_address = configuration_helpers.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
user = User.objects.get(id=user_id)
user.email_user(subject, message, from_address)
def _set_user_requirement_status(attempt, namespace, status, reason=None):
"""Sets the status of a credit requirement for the user,
based on a verification checkpoint.
"""
checkpoint = None
try:
checkpoint = VerificationCheckpoint.objects.get(photo_verification=attempt)
except VerificationCheckpoint.DoesNotExist:
log.error("Unable to find checkpoint for user with id %d", attempt.user.id)
if checkpoint is not None:
try:
set_credit_requirement_status(
attempt.user,
checkpoint.course_id,
namespace,
checkpoint.checkpoint_location,
status=status,
reason=reason,
)
except Exception: # pylint: disable=broad-except
# Catch exception if unable to add credit requirement
# status for user
log.error("Unable to add Credit requirement status for user with id %d", attempt.user.id)
@require_POST
@csrf_exempt # SS does its own message signing, and their API won't have a cookie value
def results_callback(request):
"""
Software Secure will call this callback to tell us whether a user is
verified to be who they said they are.
"""
body = request.body
try:
body_dict = json.loads(body)
except ValueError:
log.exception("Invalid JSON received from Software Secure:\n\n{}\n".format(body))
return HttpResponseBadRequest("Invalid JSON. Received:\n\n{}".format(body))
if not isinstance(body_dict, dict):
log.error("Reply from Software Secure is not a dict:\n\n{}\n".format(body))
return HttpResponseBadRequest("JSON should be dict. Received:\n\n{}".format(body))
headers = {
"Authorization": request.META.get("HTTP_AUTHORIZATION", ""),
"Date": request.META.get("HTTP_DATE", "")
}
has_valid_signature(
"POST",
headers,
body_dict,
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"],
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_SECRET_KEY"]
)
_response, access_key_and_sig = headers["Authorization"].split(" ")
access_key = access_key_and_sig.split(":")[0]
# This is what we should be doing...
#if not sig_valid:
# return HttpResponseBadRequest("Signature is invalid")
# This is what we're doing until we can figure out why we disagree on sigs
if access_key != settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"]:
return HttpResponseBadRequest("Access key invalid")
receipt_id = body_dict.get("EdX-ID")
result = body_dict.get("Result")
reason = body_dict.get("Reason", "")
error_code = body_dict.get("MessageType", "")
try:
attempt = SoftwareSecurePhotoVerification.objects.get(receipt_id=receipt_id)
except SoftwareSecurePhotoVerification.DoesNotExist:
log.error("Software Secure posted back for receipt_id %s, but not found", receipt_id)
return HttpResponseBadRequest("edX ID {} not found".format(receipt_id))
if result == "PASS":
log.debug("Approving verification for %s", receipt_id)
attempt.approve()
status = "approved"
_set_user_requirement_status(attempt, 'reverification', 'satisfied')
elif result == "FAIL":
log.debug("Denying verification for %s", receipt_id)
attempt.deny(json.dumps(reason), error_code=error_code)
status = "denied"
_set_user_requirement_status(
attempt, 'reverification', 'failed', json.dumps(reason)
)
elif result == "SYSTEM FAIL":
log.debug("System failure for %s -- resetting to must_retry", receipt_id)
attempt.system_error(json.dumps(reason), error_code=error_code)
status = "error"
log.error("Software Secure callback attempt for %s failed: %s", receipt_id, reason)
else:
log.error("Software Secure returned unknown result %s", result)
return HttpResponseBadRequest(
"Result {} not understood. Known results: PASS, FAIL, SYSTEM FAIL".format(result)
)
checkpoints = VerificationCheckpoint.objects.filter(photo_verification=attempt).all()
VerificationStatus.add_status_from_checkpoints(checkpoints=checkpoints, user=attempt.user, status=status)
# Trigger ICRV email only if ICRV status emails config is enabled
icrv_status_emails = IcrvStatusEmailsConfiguration.current()
if icrv_status_emails.enabled and checkpoints:
user_id = attempt.user.id
course_key = checkpoints[0].course_id
related_assessment_location = checkpoints[0].checkpoint_location
subject, message = _compose_message_reverification_email(
course_key, user_id, related_assessment_location, status, request
)
_send_email(user_id, subject, message)
return HttpResponse("OK!")
class ReverifyView(View):
"""
Reverification occurs when a user's initial verification is denied
or expires. When this happens, users can re-submit photos through
the re-verification flow.
Unlike in-course reverification, this flow requires users to submit
*both* face and ID photos. In contrast, during in-course reverification,
students submit only face photos, which are matched against the ID photo
the user submitted during initial verification.
"""
@method_decorator(login_required)
def get(self, request):
"""
Render the reverification flow.
Most of the work is done client-side by composing the same
Backbone views used in the initial verification flow.
"""
status, _ = SoftwareSecurePhotoVerification.user_status(request.user)
# If the user has no initial verification or if the verification
# process is still ongoing 'pending' or expired then allow the user to
# submit the photo verification.
# A photo verification is marked as 'pending' if its status is either
# 'submitted' or 'must_retry'.
if status in ["none", "must_reverify", "expired", "pending"]:
context = {
"user_full_name": request.user.profile.name,
"platform_name": configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME),
"capture_sound": staticfiles_storage.url("audio/camera_capture.wav"),
}
return render_to_response("verify_student/reverify.html", context)
else:
context = {
"status": status
}
return render_to_response("verify_student/reverify_not_allowed.html", context)
class InCourseReverifyView(View):
"""
The in-course reverification view.
In-course reverification occurs while a student is taking a course.
At points in the course, students are prompted to submit face photos,
which are matched against the ID photos the user submitted during their
initial verification.
Students are prompted to enter this flow from an "In Course Reverification"
XBlock (courseware component) that course authors add to the course.
See https://github.com/edx/edx-reverification-block for more details.
"""
@method_decorator(login_required)
def get(self, request, course_id, usage_id):
"""Display the view for face photo submission.
Args:
request(HttpRequest): HttpRequest object
course_id(str): A string of course id
usage_id(str): Location of Reverification XBlock in courseware
Returns:
HttpResponse
"""
user = request.user
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key)
if course is None:
log.error(u"Could not find course '%s' for in-course reverification.", course_key)
raise Http404
try:
checkpoint = VerificationCheckpoint.objects.get(course_id=course_key, checkpoint_location=usage_id)
except VerificationCheckpoint.DoesNotExist:
log.error(
u"No verification checkpoint exists for the "
u"course '%s' and checkpoint location '%s'.",
course_key, usage_id
)
raise Http404
initial_verification = SoftwareSecurePhotoVerification.get_initial_verification(user)
if not initial_verification:
return self._redirect_to_initial_verification(user, course_key, usage_id)
# emit the reverification event
self._track_reverification_events('edx.bi.reverify.started', user.id, course_id, checkpoint.checkpoint_name)
context = {
'course_key': unicode(course_key),
'course_name': course.display_name_with_default_escaped,
'checkpoint_name': checkpoint.checkpoint_name,
'platform_name': configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME),
'usage_id': usage_id,
'capture_sound': staticfiles_storage.url("audio/camera_capture.wav"),
}
return render_to_response("verify_student/incourse_reverify.html", context)
def _track_reverification_events(self, event_name, user_id, course_id, checkpoint):
"""Track re-verification events for a user against a reverification
checkpoint of a course.
Arguments:
event_name (str): Name of event being tracked
user_id (str): The ID of the user
course_id (unicode): ID associated with the course
checkpoint (str): Checkpoint name
Returns:
None
"""
log.info(
u"In-course reverification: event %s occurred for user '%s' in course '%s' at checkpoint '%s'",
event_name, user_id, course_id, checkpoint
)
if settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(
user_id,
event_name,
{
'category': "verification",
'label': unicode(course_id),
'checkpoint': checkpoint
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
def _redirect_to_initial_verification(self, user, course_key, checkpoint):
"""
Redirect because the user does not have an initial verification.
We will redirect the user to the initial verification flow,
passing the identifier for this checkpoint. When the user
submits a verification attempt, it will count for *both*
the initial and checkpoint verification.
Arguments:
user (User): The user who made the request.
course_key (CourseKey): The identifier for the course for which
the user is attempting to re-verify.
checkpoint (string): Location of the checkpoint in the courseware.
Returns:
HttpResponse
"""
log.info(
u"User %s does not have an initial verification, so "
u"he/she will be redirected to the \"verify later\" flow "
u"for the course %s.",
user.id, course_key
)
base_url = reverse('verify_student_verify_now', kwargs={'course_id': unicode(course_key)})
params = urllib.urlencode({"checkpoint": checkpoint})
full_url = u"{base}?{params}".format(base=base_url, params=params)
return redirect(full_url)
|
tanmaykm/edx-platform
|
lms/djangoapps/verify_student/views.py
|
Python
|
agpl-3.0
| 60,691
|
[
"VisIt"
] |
a23eed84fded6081454c7619c945b32918e999f0812819bea12a453dfbdad80b
|
import arch.bootstrap
import mdtraj as md
import numpy as np
import mdtraj.utils.unit.unit_definitions as u
kB = 1.3806488E-23 * u.joule / u.kelvin
epsilon0 = 8.854187817E-12 * u.farad / u.meter
gas_constant = 8.3144621 * u.joule / u.kelvin / u.mole
def dipole_moment_errorbars():
"""Modified from mdtraj.geometry.static_dielectic()."""
moments = md.geometry.dipole_moments(traj, charges)
mu = moments.mean(0) # Mean over frames
subtracted = moments - mu
dipole_variance = (subtracted * subtracted).sum(-1).mean(0) * (u.elementary_charge * u.nanometers) ** 2. # <M*M> - <M>*<M> = <(M - <M>) * (M - <M>)>
volume = traj.unitcell_volumes.mean() * u.nanometers ** 3. # Average box volume of trajectory
static_dielectric_sigma = dipole_variance / (3 * kB * temperature * volume * epsilon0) # Eq. 7 of Derivation of an improved simple point charge model for liquid water: SPC/A and SPC/L
def bootstrap_old(traj, charges, temperature, block_length):
n = traj.n_frames / block_length
indices = np.array_split(np.arange(traj.n_frames), n)
epsilon = np.zeros(n)
for k, ind in enumerate(indices):
t = traj[ind]
epsilon[k] = md.geometry.static_dielectric(t, charges, temperature)
return epsilon, epsilon.std() * n ** -0.5
def find_block_size(traj, charges, temperature, num_block_sizes_to_try=12, num_bootstrap=15):
block_size_grid = np.logspace(0, np.log10(len(traj)), num_block_sizes_to_try).astype('int')
block_size_grid = np.unique(block_size_grid) # The float -> int conversion sometimes leads to duplicate values, so avoid this
epsilon_grid = np.array([bootstrap(traj, charges, temperature, block_length, num_bootstrap) for block_length in block_size_grid])
return block_size_grid[epsilon_grid.argmax()]
def bootstrap(traj, charges, temperature, block_length, num_bootstrap):
bootstrap = arch.bootstrap.CircularBlockBootstrap(block_length, traj=traj)
def bootstrap_func(traj):
return md.geometry.static_dielectric(traj, charges, temperature)
results = bootstrap.apply(bootstrap_func, num_bootstrap)
epsilon_err = results.std()
return epsilon_err
|
choderalab/LiquidBenchmark
|
src/dipole_errorbars.py
|
Python
|
gpl-2.0
| 2,191
|
[
"MDTraj"
] |
c718697e8fee727e1c30e860a44fb2e4591be00feaffb0163e26865591641bd3
|
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import vericred_client
from vericred_client.rest import ApiException
from vericred_client.models.request_provider_notification_subscription import RequestProviderNotificationSubscription
class TestRequestProviderNotificationSubscription(unittest.TestCase):
""" RequestProviderNotificationSubscription unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testRequestProviderNotificationSubscription(self):
"""
Test RequestProviderNotificationSubscription
"""
model = vericred_client.models.request_provider_notification_subscription.RequestProviderNotificationSubscription()
if __name__ == '__main__':
unittest.main()
|
vericred/vericred-python
|
test/test_request_provider_notification_subscription.py
|
Python
|
apache-2.0
| 10,225
|
[
"VisIt"
] |
d6306e0d94eb0118a9d7c2432b3fe3051c855bbc26f5b4090528c20a086bab42
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
from invenio_accounts.testutils import login_user_via_session
from invenio_records.models import RecordMetadata
from inspirehep.modules.migrator.models import InspireProdRecords
def test_all_records_were_loaded(app):
records = [record.json for record in RecordMetadata.query.all()]
expected = 43
result = len(records)
assert expected == result
def test_all_records_are_valid(app):
invalid = InspireProdRecords.query.filter(InspireProdRecords.valid is False).values(InspireProdRecords.recid)
recids = [el[0] for el in invalid]
assert recids == []
def test_all_records_are_there(app_client):
# Use superadmin user to ensure we can visit all records
login_user_via_session(app_client, email='admin@inspirehep.net')
failed = []
for record in [record.json for record in RecordMetadata.query.all()]:
try:
absolute_url = record['self']['$ref']
relative_url = absolute_url.partition('api')[2]
response = app_client.get(relative_url)
assert response.status_code == 200
except Exception:
failed.append(record['control_number'])
assert failed == []
|
jacquerie/inspire-next
|
tests/integration/test_detailed_records.py
|
Python
|
gpl-3.0
| 2,148
|
[
"VisIt"
] |
500e883e43f1df75ba473d9aaf600f63a07dcd0ee491a1b44814e9bc7e829a0f
|
""" Meager code path measurement tool.
Ned Batchelder
http://nedbatchelder.com/blog/200803/python_code_complexity_microtool.html
MIT License.
"""
try:
from compiler import parse # NOQA
iter_child_nodes = None # NOQA
except ImportError:
from ast import parse, iter_child_nodes # NOQA
import optparse
import sys
from collections import defaultdict
WARNING_CODE = "W901"
class ASTVisitor:
VERBOSE = 0
def __init__(self):
self.node = None
self._cache = {}
def default(self, node, *args):
if hasattr(node, 'getChildNodes'):
children = node.getChildNodes()
else:
children = iter_child_nodes(node)
for child in children:
self.dispatch(child, *args)
def dispatch(self, node, *args):
self.node = node
klass = node.__class__
meth = self._cache.get(klass)
if meth is None:
className = klass.__name__
meth = getattr(self.visitor, 'visit' + className, self.default)
self._cache[klass] = meth
return meth(node, *args)
def preorder(self, tree, visitor, *args):
"""Do preorder walk of tree using visitor"""
self.visitor = visitor
visitor.visit = self.dispatch
self.dispatch(tree, *args) # XXX *args make sense?
class PathNode:
def __init__(self, name, look="circle"):
self.name = name
self.look = look
def to_dot(self):
print('node [shape=%s,label="%s"] %d;' % \
(self.look, self.name, self.dot_id()))
def dot_id(self):
return id(self)
class PathGraph:
def __init__(self, name, entity, lineno):
self.name = name
self.entity = entity
self.lineno = lineno
self.nodes = defaultdict(list)
def connect(self, n1, n2):
self.nodes[n1].append(n2)
def to_dot(self):
print('subgraph {')
for node in self.nodes:
node.to_dot()
for node, nexts in self.nodes.items():
for next in nexts:
print('%s -- %s;' % (node.dot_id(), next.dot_id()))
print('}')
def complexity(self):
""" Return the McCabe complexity for the graph.
V-E+2
"""
num_edges = sum([len(n) for n in self.nodes.values()])
num_nodes = len(self.nodes)
return num_edges - num_nodes + 2
class PathGraphingAstVisitor(ASTVisitor):
""" A visitor for a parsed Abstract Syntax Tree which finds executable
statements.
"""
def __init__(self):
ASTVisitor.__init__(self)
self.classname = ""
self.graphs = {}
self.reset()
def reset(self):
self.graph = None
self.tail = None
def visitFunction(self, node):
if self.classname:
entity = '%s%s' % (self.classname, node.name)
else:
entity = node.name
name = '%d:1: %r' % (node.lineno, entity)
if self.graph is not None:
# closure
pathnode = self.appendPathNode(name)
self.tail = pathnode
self.default(node)
bottom = PathNode("", look='point')
self.graph.connect(self.tail, bottom)
self.graph.connect(pathnode, bottom)
self.tail = bottom
else:
self.graph = PathGraph(name, entity, node.lineno)
pathnode = PathNode(name)
self.tail = pathnode
self.default(node)
self.graphs["%s%s" % (self.classname, node.name)] = self.graph
self.reset()
visitFunctionDef = visitFunction
def visitClass(self, node):
old_classname = self.classname
self.classname += node.name + "."
self.default(node)
self.classname = old_classname
def appendPathNode(self, name):
if not self.tail:
return
pathnode = PathNode(name)
self.graph.connect(self.tail, pathnode)
self.tail = pathnode
return pathnode
def visitSimpleStatement(self, node):
if node.lineno is None:
lineno = 0
else:
lineno = node.lineno
name = "Stmt %d" % lineno
self.appendPathNode(name)
visitAssert = visitAssign = visitAssTuple = visitPrint = \
visitPrintnl = visitRaise = visitSubscript = visitDecorators = \
visitPass = visitDiscard = visitGlobal = visitReturn = \
visitSimpleStatement
def visitLoop(self, node):
name = "Loop %d" % node.lineno
if self.graph is None:
# global loop
self.graph = PathGraph(name, name, node.lineno)
pathnode = PathNode(name)
self.tail = pathnode
self.default(node)
self.graphs["%s%s" % (self.classname, name)] = self.graph
self.reset()
else:
pathnode = self.appendPathNode(name)
self.tail = pathnode
self.default(node.body)
bottom = PathNode("", look='point')
self.graph.connect(self.tail, bottom)
self.graph.connect(pathnode, bottom)
self.tail = bottom
# TODO: else clause in node.else_
visitFor = visitWhile = visitLoop
def visitIf(self, node):
name = "If %d" % node.lineno
pathnode = self.appendPathNode(name)
if not pathnode:
return # TODO: figure out what to do with if's outside def's.
loose_ends = []
for t, n in node.tests:
self.tail = pathnode
self.default(n)
loose_ends.append(self.tail)
if node.else_:
self.tail = pathnode
self.default(node.else_)
loose_ends.append(self.tail)
else:
loose_ends.append(pathnode)
bottom = PathNode("", look='point')
for le in loose_ends:
self.graph.connect(le, bottom)
self.tail = bottom
# TODO: visitTryExcept
# TODO: visitTryFinally
# TODO: visitWith
# XXX todo: determine which ones can add to the complexity
# py2
# TODO: visitStmt
# TODO: visitAssName
# TODO: visitCallFunc
# TODO: visitConst
# py3
# TODO: visitStore
# TODO: visitCall
# TODO: visitLoad
# TODO: visitNum
# TODO: visitarguments
# TODO: visitExpr
def get_code_complexity(code, min=7, filename='stdin'):
complex = []
try:
ast = parse(code)
except AttributeError:
e = sys.exc_info()[1]
sys.stderr.write("Unable to parse %s: %s\n" % (filename, e))
return 0
visitor = PathGraphingAstVisitor()
visitor.preorder(ast, visitor)
for graph in visitor.graphs.values():
if graph is None:
# ?
continue
if graph.complexity() >= min:
complex.append(dict(
type = 'W',
lnum = graph.lineno,
text = '%s %r is too complex (%d)' % (
WARNING_CODE,
graph.entity,
graph.complexity(),
)
))
return complex
def get_module_complexity(module_path, min=7):
"""Returns the complexity of a module"""
code = open(module_path, "rU").read() + '\n\n'
return get_code_complexity(code, min, filename=module_path)
def main(argv):
opar = optparse.OptionParser()
opar.add_option("-d", "--dot", dest="dot",
help="output a graphviz dot file", action="store_true")
opar.add_option("-m", "--min", dest="min",
help="minimum complexity for output", type="int",
default=2)
options, args = opar.parse_args(argv)
text = open(args[0], "rU").read() + '\n\n'
ast = parse(text)
visitor = PathGraphingAstVisitor()
visitor.preorder(ast, visitor)
if options.dot:
print('graph {')
for graph in visitor.graphs.values():
if graph.complexity() >= options.min:
graph.to_dot()
print('}')
else:
for graph in visitor.graphs.values():
if graph.complexity() >= options.min:
print(graph.name, graph.complexity())
if __name__ == '__main__':
main(sys.argv[1:])
|
isohybrid/dotfile
|
vim/bundle/git:--github.com-klen-python-mode/pylibs/mccabe.py
|
Python
|
bsd-2-clause
| 8,283
|
[
"VisIt"
] |
e903786e4e8ded5a7e9281017155fbb54e2f5b097f8419a9a2072add36664092
|
from __future__ import print_function, absolute_import, division
import glob
import os
import numpy as np
from .utils import expand_path, num_samples
class BaseDatasetLoader(object):
short_name = None
def load(self):
raise NotImplementedError('should be implemented in subclass')
class MSMBuilderDatasetLoader(BaseDatasetLoader):
short_name = 'msmbuilder'
def __init__(self, path, fmt=None, verbose=False):
self.path = path
self.fmt = fmt
self.verbose = verbose
def load(self):
from msmbuilder.dataset import dataset
ds = dataset(self.path, mode='r', fmt=self.fmt, verbose=self.verbose)
print('Dataset provenance:\n')
print(ds.provenance)
return ds, None
class NumpyDatasetLoader(BaseDatasetLoader):
short_name = 'numpy'
def __init__(self, filenames):
self.filenames = filenames
def load(self):
filenames = sorted(glob.glob(expand_path(self.filenames)))
if len(filenames) == 0:
raise RuntimeError('no filenames matched by pattern: %s' %
self.filenames)
ds = [np.load(f) for f in filenames]
return ds, None
class HDF5DatasetLoader(BaseDatasetLoader):
short_name = 'hdf5'
def __init__(self, filenames, y_col=None, stride=1, concat=False):
self.filenames = filenames
self.y_col = y_col
self.stride = stride
self.concat = concat
def transform(self, X):
n_rows = X.shape[0]
X = np.atleast_2d(X)
if X.shape[0] != n_rows:
X = X.T
if self.y_col is not None:
cols = range(X.shape[1])
x_idx = [i for i, val in enumerate(cols) if val != self.y_col]
y_idx = [i for i, val in enumerate(cols) if val == self.y_col]
return X[::self.stride, x_idx], X[::self.stride, y_idx].ravel()
return X[::self.stride, :], None
def loader(self, fn):
from mdtraj import io
dataset = io.loadh(fn)
for key in dataset.iterkeys():
yield dataset[key]
def load(self):
X = []
y = []
filenames = sorted(glob.glob(expand_path(self.filenames)))
for fn in filenames:
for data in self.loader(fn):
data = self.transform(data)
X.append(data[0])
y.append(data[1])
if self.concat:
X = np.concatenate(X, axis=0)
y = np.concatenate(y, axis=0)
if num_samples(X) == 1:
X = X[0]
y = y[0]
if self.y_col is not None:
return X, y
return X, None
class DSVDatasetLoader(BaseDatasetLoader):
short_name = 'dsv'
def __init__(self, filenames, y_col=None, delimiter=',', skip_header=0,
skip_footer=0, filling_values=np.nan, usecols=None, stride=1,
concat=False):
self.filenames = filenames
self.y_col = y_col
self.delimiter = delimiter
self.skip_header = skip_header
self.skip_footer = skip_footer
self.filling_values = filling_values
if usecols and isinstance(usecols, str):
usecols = list(map(int, usecols.strip().split(',')))
elif usecols and isinstance(usecols, (tuple, set)):
usecols = sorted(list(usecols))
if usecols and y_col:
if y_col not in usecols:
usecols.append(y_col)
self.usecols = usecols
self.stride = stride
self.concat = concat
def transform(self, X):
n_rows = X.shape[0]
X = np.atleast_2d(X)
if X.shape[0] != n_rows:
X = X.T
if self.y_col is not None:
cols = list(range(X.shape[1]))
if self.usecols:
cols = self.usecols
x_idx = [i for i, val in enumerate(cols) if val != self.y_col]
y_idx = [i for i, val in enumerate(cols) if val == self.y_col]
return X[::self.stride, x_idx], X[::self.stride, y_idx].ravel()
return X[::self.stride, :], None
def loader(self, fn):
return np.genfromtxt(fn, delimiter=self.delimiter,
skip_header=self.skip_header,
skip_footer=self.skip_footer,
filling_values=self.filling_values,
usecols=self.usecols)
def load(self):
X = []
y = []
filenames = sorted(glob.glob(expand_path(self.filenames)))
for fn in filenames:
data = self.transform(self.loader(fn))
X.append(data[0])
y.append(data[1])
if self.concat:
X = np.concatenate(X, axis=0)
y = np.concatenate(y, axis=0)
if num_samples(X) == 1:
X = X[0]
y = y[0]
if self.y_col is not None:
return X, y
return X, None
class MDTrajDatasetLoader(BaseDatasetLoader):
short_name = 'mdtraj'
def __init__(self, trajectories, topology=None, stride=1, verbose=False):
self.trajectories = trajectories
self.topology = topology
self.stride = stride
self.verbose = verbose
def load(self):
import mdtraj
filenames = sorted(glob.glob(expand_path(self.trajectories)))
if len(filenames) == 0:
raise RuntimeError('no filenames matched by pattern: %s' %
self.trajectories)
top = self.topology
kwargs = {}
if top is not None:
top = expand_path(self.topology)
kwargs = {'top': top}
X = []
y = None
for fn in filenames:
if self.verbose:
print('[mdtraj] loading %s' % fn)
X.append(mdtraj.load(fn, stride=self.stride, **kwargs))
return X, y
class FilenameDatasetLoader(BaseDatasetLoader):
"""Just pass a bunch of filenames to the first step of the pipeline
The pipeline will do the loading.
"""
short_name = 'filename'
def __init__(self, trajectories, abs_path=True):
self.traj_glob = trajectories
self.abs_path = abs_path
def load(self):
filenames = sorted(glob.glob(expand_path(self.traj_glob)))
if len(filenames) == 0:
raise RuntimeError('no filenames matched by pattern: %s' %
self.traj_glob)
if self.abs_path:
filenames = [os.path.abspath(fn) for fn in filenames]
return filenames, None
class JoblibDatasetLoader(BaseDatasetLoader):
short_name = 'joblib'
def __init__(self, filenames, x_name=None, y_name=None,
system_joblib=False):
self.filenames = filenames
self.x_name = x_name
self.y_name = y_name
self.system_joblib = system_joblib
def load(self):
if self.system_joblib:
import joblib
else:
from sklearn.externals import joblib
X, y = [], []
filenames = sorted(glob.glob(expand_path(self.filenames)))
if len(filenames) == 0:
raise RuntimeError('no filenames matched by pattern: %s' %
self.filenames)
for fn in filenames:
obj = joblib.load(fn)
if isinstance(obj, (list, np.ndarray)):
X.append(obj)
else:
X.append(obj[self.x_name])
y.append(obj[self.y_name])
if num_samples(X) == 1:
X = X[0]
if len(y) == 1:
y = y[0]
elif len(y) == 0:
y = None
return X, y
class SklearnDatasetLoader(BaseDatasetLoader):
short_name = 'sklearn_dataset'
def __init__(self, method, x_name='data', y_name='target', **kwargs):
self.method = method
self.x_name = x_name
self.y_name = y_name
self.kwargs = kwargs
def load(self):
import sklearn.datasets
try:
loader = getattr(sklearn.datasets, self.method)
except AttributeError:
raise RuntimeError('no %s in sklearn.datasets' % self.method)
bunch = loader(**self.kwargs)
X = bunch[self.x_name]
y = bunch[self.y_name]
return X, y
|
msultan/osprey
|
osprey/dataset_loaders.py
|
Python
|
apache-2.0
| 8,315
|
[
"MDTraj"
] |
e3b5babd3236f06c4126f54a377692e50577ac5e3af3a5a3848d25e502ff0e88
|
#!/usr/bin/env python
__author__ = 'waroquiers'
import unittest2
import os
import json
import numpy as np
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import LocalGeometryFinder
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
# from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import SimplestChemenvStrategy
# from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import SimpleAbundanceChemenvStrategy
# from pymatgen.core.structure import Structure
json_files_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "..",
'test_files', "chemenv", "json_test_files")
class CoordinationGeometryFinderTest(unittest2.TestCase):
def setUp(self):
self.lgf = LocalGeometryFinder()
self.lgf.setup_parameters(centering_type='standard',
structure_refinement=self.lgf.STRUCTURE_REFINEMENT_NONE)
# self.strategies = [SimplestChemenvStrategy(), SimpleAbundanceChemenvStrategy()]
#
# def _strategy_test(self, strategy):
# files = []
# for (dirpath, dirnames, filenames) in os.walk(json_files_dir):
# files.extend(filenames)
# break
#
# for ifile, json_file in enumerate(files):
# with self.subTest(json_file=json_file):
# f = open("{}/{}".format(json_files_dir, json_file), 'r')
# dd = json.load(f)
# f.close()
#
# atom_indices = dd['atom_indices']
# expected_geoms = dd['expected_geoms']
#
# struct = Structure.from_dict(dd['structure'])
#
# struct = self.lgf.setup_structure(struct)
# se = self.lgf.compute_structure_environments_detailed_voronoi(only_indices=atom_indices,
# maximum_distance_factor=1.5)
#
# #All strategies should get the correct environment with their default parameters
# strategy.set_structure_environments(se)
# for ienv, isite in enumerate(atom_indices):
# ce = strategy.get_site_coordination_environment(struct[isite])
# try:
# coord_env = ce[0]
# except TypeError:
# coord_env = ce
# #Check that the environment found is the expected one
# self.assertEqual(coord_env, expected_geoms[ienv])
#
# def test_simplest_chemenv_strategy(self):
# strategy = SimplestChemenvStrategy()
# self._strategy_test(strategy)
#
# def test_simple_abundance_chemenv_strategy(self):
# strategy = SimpleAbundanceChemenvStrategy()
# self._strategy_test(strategy)
def test_perfect_environments(self):
allcg = AllCoordinationGeometries()
indices_CN = {1: [0],
2: [1, 0],
3: [1, 0, 2],
4: [2, 0, 3, 1],
5: [2, 3, 1, 0, 4],
6: [0, 2, 3, 1, 5, 4],
7: [2, 6, 0, 3, 4, 5, 1],
8: [1, 2, 6, 3, 7, 0, 4, 5],
9: [5, 2, 6, 0, 4, 7, 3, 8, 1],
10: [8, 5, 6, 3, 0, 7, 2, 4, 9, 1],
11: [7, 6, 4, 1, 2, 5, 0, 8, 9, 10, 3],
12: [5, 8, 9, 0, 3, 1, 4, 2, 6, 11, 10, 7],
13: [4, 11, 5, 12, 1, 2, 8, 3, 0, 6, 9, 7, 10],
}
for coordination in range(1, 14):
for mp_symbol in allcg.get_implemented_geometries(coordination=coordination,
returned='mp_symbol'):
with self.subTest(msg=mp_symbol, mp_symbol=mp_symbol):
cg = allcg.get_geometry_from_mp_symbol(mp_symbol=mp_symbol)
self.lgf.allcg = AllCoordinationGeometries(only_symbols=[mp_symbol])
self.lgf.setup_test_perfect_environment(mp_symbol, randomness=False,
indices=indices_CN[coordination],
random_translation='NONE', random_rotation='NONE',
random_scale='NONE')
se = self.lgf.compute_structure_environments(only_indices=[0],
maximum_distance_factor=1.01*cg.distfactor_max,
min_cn=cg.coordination_number,
max_cn=cg.coordination_number,
only_symbols=[mp_symbol]
)
self.assertAlmostEqual(se.get_csm(0, mp_symbol)['symmetry_measure'], 0.0, delta=1e-8,
msg='Failed to get perfect environment with mp_symbol {}'.format(mp_symbol))
if __name__ == "__main__":
unittest2.main()
|
aykol/pymatgen
|
pymatgen/analysis/chemenv/coordination_environments/tests/test_coordination_geometry_finder.py
|
Python
|
mit
| 5,358
|
[
"pymatgen"
] |
ff33227fd7ee23d39b7953f7d9b00c2363acda9eee5548ab4a31552bb409a8f8
|
########################################################################
#
# File Name: __init__.py
#
#
from types import UnicodeType
"""
WWW: http://4suite.com/4DOM e-mail: support@4suite.com
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
"""Some Helper functions: 4DOM/PyXML-specific Extensions to the DOM,
and DOM-related utilities."""
__all__ = ["Print", "PrettyPrint"]
import string
import sys
import re
from xml.dom import Node
from xml.dom import XML_NAMESPACE, XMLNS_NAMESPACE
HTML_4_TRANSITIONAL_INLINE = ['TT', 'I', 'B', 'U', 'S', 'STRIKE', 'BIG', 'SMALL', 'EM', 'STRONG', 'DFN', 'CODE', 'SAMP', 'KBD', 'VAR', 'CITE', 'ABBR', 'ACRONYM', 'A', 'IMG', 'APPLET', 'OBJECT', 'FONT', 'BASEFONT', 'SCRIPT', 'MAP', 'Q', 'SUB', 'SUP', 'SPAN', 'BDO', 'IFRAME', 'INPUT', 'SELECT', 'TEXTAREA', 'LABEL', 'BUTTON']
HTML_FORBIDDEN_END = ['AREA', 'BASE', 'BASEFONT', 'BR', 'COL', 'FRAME', 'HR', 'IMG', 'INPUT', 'ISINDEX', 'LINK', 'META', 'PARAM']
XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
def Print(root, stream=sys.stdout, encoding='UTF-8'):
if not hasattr(root, "nodeType"):
return
nss = SeekNss(root)
visitor = PrintVisitor(stream, encoding, nsHints=nss)
PrintWalker(visitor, root).run()
return
def PrettyPrint(root, stream=sys.stdout, encoding='UTF-8', indent=' ',
preserveElements=None):
if not hasattr(root, "nodeType"):
return
nss_hints = SeekNss(root)
preserveElements = preserveElements or []
owner_doc = root.ownerDocument or root
if hasattr(owner_doc, 'getElementsByName'):
#We don't want to insert any whitespace into HTML inline elements
preserveElements = preserveElements + HTML_4_TRANSITIONAL_INLINE
visitor = PrintVisitor(stream, encoding, indent,
preserveElements, nss_hints)
PrintWalker(visitor, root).run()
stream.write('\n')
return
def GetAllNs(node):
#The xml namespace is implicit
nss = {'xml': XML_NAMESPACE}
if node.nodeType == Node.ATTRIBUTE_NODE and node.ownerElement:
return GetAllNs(node.ownerElement)
if node.nodeType == Node.ELEMENT_NODE:
if node.namespaceURI:
nss[node.prefix] = node.namespaceURI
for attr in node.attributes.values():
if attr.namespaceURI == XMLNS_NAMESPACE:
if attr.localName == 'xmlns':
nss[None] = attr.value
else:
nss[attr.localName] = attr.value
elif attr.namespaceURI:
nss[attr.prefix] = attr.namespaceURI
if node.parentNode:
#Inner NS/Prefix mappings take precedence over outer ones
parent_nss = GetAllNs(node.parentNode)
parent_nss.update(nss)
nss = parent_nss
return nss
def SeekNss(node, nss=None):
'''traverses the tree to seek an approximate set of defined namespaces'''
nss = nss or {}
for child in node.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
if child.namespaceURI:
nss[child.prefix] = child.namespaceURI
for attr in child.attributes.values():
if attr.namespaceURI == XMLNS_NAMESPACE:
if attr.localName == 'xmlns':
nss[None] = attr.value
else:
nss[attr.localName] = attr.value
elif attr.namespaceURI:
nss[attr.prefix] = attr.namespaceURI
SeekNss(child, nss)
return nss
class PrintVisitor:
def __init__(self, stream, encoding, indent='', plainElements=None,
nsHints=None, isXhtml=0, force8bit=0):
self.stream = stream
self.encoding = encoding
# Namespaces
self._namespaces = [{}]
self._nsHints = nsHints or {}
# PrettyPrint
self._indent = indent
self._depth = 0
self._inText = 0
self._plainElements = plainElements or []
# HTML support
self._html = None
self._isXhtml = isXhtml
self.force8bit = force8bit
return
def _write(self, text):
if self.force8bit:
obj = strobj_to_utf8str(text, self.encoding)
else:
obj = utf8_to_code(text, self.encoding)
self.stream.write(obj)
return
def _tryIndent(self):
if not self._inText and self._indent:
self._write('\n' + self._indent * self._depth)
return
def visit(self, node):
if self._html is None:
# Set HTMLDocument flag here for speed
self._html = hasattr(node.ownerDocument, 'getElementsByName')
if node.nodeType == Node.ELEMENT_NODE:
return self.visitElement(node)
elif node.nodeType == Node.ATTRIBUTE_NODE:
return self.visitAttr(node)
elif node.nodeType == Node.TEXT_NODE:
return self.visitText(node)
elif node.nodeType == Node.CDATA_SECTION_NODE:
return self.visitCDATASection(node)
elif node.nodeType == Node.ENTITY_REFERENCE_NODE:
return self.visitEntityReference(node)
elif node.nodeType == Node.ENTITY_NODE:
return self.visitEntity(node)
elif node.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
return self.visitProcessingInstruction(node)
elif node.nodeType == Node.COMMENT_NODE:
return self.visitComment(node)
elif node.nodeType == Node.DOCUMENT_NODE:
return self.visitDocument(node)
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
return self.visitDocumentType(node)
elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
return self.visitDocumentFragment(node)
elif node.nodeType == Node.NOTATION_NODE:
return self.visitNotation(node)
# It has a node type, but we don't know how to handle it
raise Exception("Unknown node type: %s" % repr(node))
def visitNodeList(self, node, exclude=None):
for curr in node:
if curr is not exclude:
self.visit(curr)
return
def visitNamedNodeMap(self, node):
for item in node.values():
self.visit(item)
return
def visitAttr(self, node):
if node.namespaceURI == XMLNS_NAMESPACE:
# Skip namespace declarations
return
self._write(' ' + node.name)
value = node.value
if value or not self._html:
text = TranslateCdata(value, self.encoding)
text, delimiter = TranslateCdataAttr(text)
self.stream.write("=%s%s%s" % (delimiter, text, delimiter))
return
def visitProlog(self):
self._write("<?xml version='1.0' encoding='%s'?>" % (
self.encoding or 'utf-8'
))
self._inText = 0
return
def visitDocument(self, node):
not self._html and self.visitProlog()
node.doctype and self.visitDocumentType(node.doctype)
self.visitNodeList(node.childNodes, exclude=node.doctype)
return
def visitDocumentFragment(self, node):
self.visitNodeList(node.childNodes)
return
def visitElement(self, node):
self._namespaces.append(self._namespaces[-1].copy())
inline = node.tagName in self._plainElements
not inline and self._tryIndent()
self._write('<%s' % node.tagName)
if self._isXhtml or not self._html:
namespaces = ''
if self._isXhtml:
nss = {'xml': XML_NAMESPACE, None: XHTML_NAMESPACE}
else:
nss = GetAllNs(node)
if self._nsHints:
self._nsHints.update(nss)
nss = self._nsHints
self._nsHints = {}
del nss['xml']
for prefix in nss.keys():
if prefix not in self._namespaces[-1] or self._namespaces[-1][prefix] != nss[prefix]:
nsuri, delimiter = TranslateCdataAttr(nss[prefix])
if prefix:
xmlns = " xmlns:%s=%s%s%s" % (prefix, delimiter, nsuri, delimiter)
else:
xmlns = " xmlns=%s%s%s" % (delimiter, nsuri, delimiter)
namespaces = namespaces + xmlns
self._namespaces[-1][prefix] = nss[prefix]
self._write(namespaces)
for attr in node.attributes.values():
self.visitAttr(attr)
if len(node.childNodes):
self._write('>')
self._depth = self._depth + 1
self.visitNodeList(node.childNodes)
self._depth = self._depth - 1
if not self._html or (node.tagName not in HTML_FORBIDDEN_END):
not (self._inText and inline) and self._tryIndent()
self._write('</%s>' % node.tagName)
elif not self._html:
self._write('/>')
elif node.tagName not in HTML_FORBIDDEN_END:
self._write('></%s>' % node.tagName)
else:
self._write('>')
del self._namespaces[-1]
self._inText = 0
return
def visitText(self, node):
text = node.data
if self._indent:
text = string.strip(text) and text
if text:
if self._html:
text = TranslateHtmlCdata(text, self.encoding)
else:
text = TranslateCdata(text, self.encoding)
self.stream.write(text)
self._inText = 1
return
def visitDocumentType(self, doctype):
if not doctype.systemId and not doctype.publicId:
return
self._tryIndent()
self._write('<!DOCTYPE %s' % doctype.name)
if doctype.systemId and '"' in doctype.systemId:
system = "'%s'" % doctype.systemId
else:
system = '"%s"' % doctype.systemId
if doctype.publicId and '"' in doctype.publicId:
# We should probably throw an error
# Valid characters: <space> | <newline> | <linefeed> |
# [a-zA-Z0-9] | [-'()+,./:=?;!*#@$_%]
public = "'%s'" % doctype.publicId
else:
public = '"%s"' % doctype.publicId
if doctype.publicId and doctype.systemId:
self._write(' PUBLIC %s %s' % (public, system))
elif doctype.systemId:
self._write(' SYSTEM %s' % system)
if doctype.entities or doctype.notations:
self._write(' [')
self._depth = self._depth + 1
self.visitNamedNodeMap(doctype.entities)
self.visitNamedNodeMap(doctype.notations)
self._depth = self._depth - 1
self._tryIndent()
self._write(']>')
else:
self._write('>')
self._inText = 0
return
def visitEntity(self, node):
"""Visited from a NamedNodeMap in DocumentType"""
self._tryIndent()
self._write('<!ENTITY %s' % (node.nodeName))
node.publicId and self._write(' PUBLIC %s' % node.publicId)
node.systemId and self._write(' SYSTEM %s' % node.systemId)
node.notationName and self._write(' NDATA %s' % node.notationName)
self._write('>')
return
def visitNotation(self, node):
"""Visited from a NamedNodeMap in DocumentType"""
self._tryIndent()
self._write('<!NOTATION %s' % node.nodeName)
node.publicId and self._write(' PUBLIC %s' % node.publicId)
node.systemId and self._write(' SYSTEM %s' % node.systemId)
self._write('>')
return
def visitCDATASection(self, node):
self._tryIndent()
self._write('<![CDATA[%s]]>' % (node.data))
self._inText = 0
return
def visitComment(self, node):
self._tryIndent()
self._write('<!--%s-->' % (node.data))
self._inText = 0
return
def visitEntityReference(self, node):
self._write('&%s;' % node.nodeName)
self._inText = 1
return
def visitProcessingInstruction(self, node):
self._tryIndent()
self._write('<?%s %s?>' % (node.target, node.data))
self._inText = 0
return
class PrintWalker:
def __init__(self, visitor, startNode):
self.visitor = visitor
self.start_node = startNode
return
def step(self):
"""There is really no step to printing. It prints the whole thing"""
self.visitor.visit(self.start_node)
return
def run(self):
return self.step()
ILLEGAL_LOW_CHARS = '[\x01-\x08\x0B-\x0C\x0E-\x1F]'
SURROGATE_BLOCK = '[\xF0-\xF7][\x80-\xBF][\x80-\xBF][\x80-\xBF]'
ILLEGAL_HIGH_CHARS = '\xEF\xBF[\xBE\xBF]'
#Note: Prolly fuzzy on this, but it looks as if characters from the surrogate block are allowed if in scalar form, which is encoded in UTF8 the same was as in surrogate block form
XML_ILLEGAL_CHAR_PATTERN = re.compile('%s|%s' % (ILLEGAL_LOW_CHARS, ILLEGAL_HIGH_CHARS))
g_utf8TwoBytePattern = re.compile('([\xC0-\xC3])([\x80-\xBF])')
g_cdataCharPattern = re.compile('[&<]|]]>')
g_charToEntity = {
'&': '&',
'<': '<',
']]>': ']]>',
}
# Slightly modified to not use types.Unicode
import codecs
def utf8_to_code(text, encoding):
encoder = codecs.lookup(encoding)[0] # encode,decode,reader,writer
if type(text) is not unicode:
text = unicode(text, "utf-8")
return encoder(text)[0] # result,size
def strobj_to_utf8str(text, encoding):
if string.upper(encoding) not in ["UTF-8", "ISO-8859-1", "LATIN-1"]:
raise ValueError("Invalid encoding: %s" % encoding)
encoder = codecs.lookup(encoding)[0] # encode,decode,reader,writer
if type(text) is not unicode:
text = unicode(text, "utf-8")
#FIXME
return str(encoder(text)[0])
def TranslateCdataAttr(characters):
'''Handles normalization and some intelligence about quoting'''
if not characters:
return '', "'"
if "'" in characters:
delimiter = '"'
new_chars = re.sub('"', '"', characters)
else:
delimiter = "'"
new_chars = re.sub("'", ''', characters)
#FIXME: There's more to normalization
#Convert attribute new-lines to character entity
# characters is possibly shorter than new_chars (no entities)
if "\n" in characters:
new_chars = re.sub('\n', ' ', new_chars)
return new_chars, delimiter
#Note: Unicode object only for now
def TranslateCdata(characters, encoding='UTF-8', prev_chars='', markupSafe=0,
charsetHandler=utf8_to_code):
"""
charsetHandler is a function that takes a string or unicode object as the
first argument, representing the string to be processed, and an encoding
specifier as the second argument. It must return a string or unicode
object
"""
if not characters:
return ''
if not markupSafe:
if g_cdataCharPattern.search(characters):
new_string = g_cdataCharPattern.subn(
lambda m, d=g_charToEntity: d[m.group()],
characters)[0]
else:
new_string = characters
if prev_chars[-2:] == ']]' and characters[0] == '>':
new_string = '>' + new_string[1:]
else:
new_string = characters
#Note: use decimal char entity rep because some browsers are broken
#FIXME: This will bomb for high characters. Should, for instance, detect
#The UTF-8 for 0xFFFE and put out 
if XML_ILLEGAL_CHAR_PATTERN.search(new_string):
new_string = XML_ILLEGAL_CHAR_PATTERN.subn(
lambda m: '&#%i;' % ord(m.group()),
new_string)[0]
new_string = charsetHandler(new_string, encoding)
return new_string
def TranslateHtmlCdata(characters, encoding='UTF-8', prev_chars=''):
#Translate numerical char entity references with HTML entity equivalents
new_string, _ignore_num_subst = re.subn(
g_cdataCharPattern,
lambda m, d=g_charToEntity: d[m.group()],
characters
)
if prev_chars[-2:] == ']]' and new_string[0] == '>':
new_string = '>' + new_string[1:]
new_string = UseHtmlCharEntities(new_string)
try:
new_string = utf8_to_code(new_string, encoding)
except:
#FIXME: This is a work-around, contributed by Mike Brown, that
#Deals with escaping output, until we have XML/HTML aware codecs
tmp_new_string = ""
for c in new_string:
try:
new_c = utf8_to_code(c, encoding)
except:
new_c = '&#%i;' % ord(c)
tmp_new_string = tmp_new_string + new_c
new_string = tmp_new_string
#new_string, num_subst = re.subn(g_xmlIllegalCharPattern, lambda m: '&#%i;'%ord(m.group()), new_string)
#Note: use decimal char entity rep because some browsers are broken
return new_string
HTML_CHARACTER_ENTITIES = {
# Sect 24.2 -- ISO 8859-1
160: 'nbsp',
161: 'iexcl',
162: 'cent',
163: 'pound',
164: 'curren',
165: 'yen',
166: 'brvbar',
167: 'sect',
168: 'uml',
169: 'copy',
170: 'ordf',
171: 'laquo',
172: 'not',
173: 'shy',
174: 'reg',
175: 'macr',
176: 'deg',
177: 'plusmn',
178: 'sup2',
179: 'sup3',
180: 'acute',
181: 'micro',
182: 'para',
183: 'middot',
184: 'cedil',
185: 'sup1',
186: 'ordm',
187: 'raquo',
188: 'frac14',
189: 'frac12',
190: 'frac34',
191: 'iquest',
192: 'Agrave',
193: 'Aacute',
194: 'Acirc',
195: 'Atilde',
196: 'Auml',
197: 'Aring',
198: 'AElig',
199: 'Ccedil',
200: 'Egrave',
201: 'Eacute',
202: 'Ecirc',
203: 'Euml',
204: 'Igrave',
205: 'Iacute',
206: 'Icirc',
207: 'Iuml',
208: 'ETH',
209: 'Ntilde',
210: 'Ograve',
211: 'Oacute',
212: 'Ocirc',
213: 'Otilde',
214: 'Ouml',
215: 'times',
216: 'Oslash',
217: 'Ugrave',
218: 'Uacute',
219: 'Ucirc',
220: 'Uuml',
221: 'Yacute',
222: 'THORN',
223: 'szlig',
224: 'agrave',
225: 'aacute',
226: 'acirc',
227: 'atilde',
228: 'auml',
229: 'aring',
230: 'aelig',
231: 'ccedil',
232: 'egrave',
233: 'eacute',
234: 'ecirc',
235: 'euml',
236: 'igrave',
237: 'iacute',
238: 'icirc',
239: 'iuml',
240: 'eth',
241: 'ntilde',
242: 'ograve',
243: 'oacute',
244: 'ocirc',
245: 'otilde',
246: 'ouml',
247: 'divide',
248: 'oslash',
249: 'ugrave',
250: 'uacute',
251: 'ucirc',
252: 'uuml',
253: 'yacute',
254: 'thorn',
255: 'yuml',
# Sect 24.3 -- Symbols, Mathematical Symbols, and Greek Letters
# Latin Extended-B
402: 'fnof',
# Greek
913: 'Alpha',
914: 'Beta',
915: 'Gamma',
916: 'Delta',
917: 'Epsilon',
918: 'Zeta',
919: 'Eta',
920: 'Theta',
921: 'Iota',
922: 'Kappa',
923: 'Lambda',
924: 'Mu',
925: 'Nu',
926: 'Xi',
927: 'Omicron',
928: 'Pi',
929: 'Rho',
931: 'Sigma',
932: 'Tau',
933: 'Upsilon',
934: 'Phi',
935: 'Chi',
936: 'Psi',
937: 'Omega',
945: 'alpha',
946: 'beta',
947: 'gamma',
948: 'delta',
949: 'epsilon',
950: 'zeta',
951: 'eta',
952: 'theta',
953: 'iota',
954: 'kappa',
955: 'lambda',
956: 'mu',
957: 'nu',
958: 'xi',
959: 'omicron',
960: 'pi',
961: 'rho',
962: 'sigmaf',
963: 'sigma',
964: 'tau',
965: 'upsilon',
966: 'phi',
967: 'chi',
968: 'psi',
969: 'omega',
977: 'thetasym',
978: 'upsih',
982: 'piv',
# General Punctuation
8226: 'bull', # bullet
8230: 'hellip', # horizontal ellipsis
8242: 'prime', # prime (minutes/feet)
8243: 'Prime', # double prime (seconds/inches)
8254: 'oline', # overline (spacing overscore)
8250: 'frasl', # fractional slash
# Letterlike Symbols
8472: 'weierp', # script capital P (power set/Weierstrass p)
8465: 'image', # blackletter capital I (imaginary part)
8476: 'real', # blackletter capital R (real part)
8482: 'trade', # trademark
8501: 'alefsym', # alef symbol (first transfinite cardinal)
# Arrows
8592: 'larr', # leftwards arrow
8593: 'uarr', # upwards arrow
8594: 'rarr', # rightwards arrow
8595: 'darr', # downwards arrow
8596: 'harr', # left right arrow
8629: 'crarr', # downwards arrow with corner leftwards (carriage return)
8656: 'lArr', # leftwards double arrow
8657: 'uArr', # upwards double arrow
8658: 'rArr', # rightwards double arrow
8659: 'dArr', # downwards double arrow
8660: 'hArr', # left right double arrow
# Mathematical Operators
8704: 'forall', # for all
8706: 'part', # partial differential
8707: 'exist', # there exists
8709: 'empty', # empty set, null set, diameter
8711: 'nabla', # nabla, backward difference
8712: 'isin', # element of
8713: 'notin', # not an element of
8715: 'ni', # contains as member
8719: 'prod', # n-ary product, product sign
8721: 'sum', # n-ary sumation
8722: 'minus', # minus sign
8727: 'lowast', # asterisk operator
8730: 'radic', # square root, radical sign
8733: 'prop', # proportional to
8734: 'infin', # infinity
8736: 'ang', # angle
8743: 'and', # logical and, wedge
8744: 'or', # logical or, vee
8745: 'cap', # intersection, cap
8746: 'cup', # union, cup
8747: 'int', # integral
8756: 'there4', # therefore
8764: 'sim', # tilde operator, varies with, similar to
8773: 'cong', # approximately equal to
8776: 'asymp', # almost equal to, asymptotic to
8800: 'ne', # not equal to
8801: 'equiv', # identical to
8804: 'le', # less-than or equal to
8805: 'ge', # greater-than or equal to
8834: 'sub', # subset of
8835: 'sup', # superset of
8836: 'nsub', # not subset of
8838: 'sube', # subset of or equal to
8839: 'supe', # superset of or equal to
8853: 'oplus', # circled plus, direct sum
8855: 'otimes', # circled times, vector product
8869: 'perp', # up tack, orthogonal to, perpendicular
8901: 'sdot', # dot operator
8968: 'lceil', # left ceiling, apl upstile
8969: 'rceil', # right ceiling
8970: 'lfloor', # left floor, apl downstile
8971: 'rfloor', # right floor
9001: 'lang', # left-pointing angle bracket, bra
9002: 'rang', # right-pointing angle bracket, ket
9674: 'loz', # lozenge
# Miscellaneous Symbols
9824: 'spades',
9827: 'clubs',
9829: 'hearts',
9830: 'diams',
# Sect 24.4 -- Markup Significant and Internationalization
# Latin Extended-A
338: 'OElig', # capital ligature OE
339: 'oelig', # small ligature oe
352: 'Scaron', # capital S with caron
353: 'scaron', # small s with caron
376: 'Yuml', # capital Y with diaeresis
# Spacing Modifier Letters
710: 'circ', # circumflexx accent
732: 'tidle', # small tilde
# General Punctuation
8194: 'ensp', # en space
8195: 'emsp', # em space
8201: 'thinsp', # thin space
8204: 'zwnj', # zero-width non-joiner
8205: 'zwj', # zero-width joiner
8206: 'lrm', # left-to-right mark
8207: 'rlm', # right-to-left mark
8211: 'ndash', # en dash
8212: 'mdash', # em dash
8216: 'lsquo', # left single quotation mark
8217: 'rsquo', # right single quotation mark
8218: 'sbquo', # single low-9 quotation mark
8220: 'ldquo', # left double quotation mark
8221: 'rdquo', # right double quotation mark
8222: 'bdquo', # double low-9 quotation mark
8224: 'dagger', # dagger
8225: 'Dagger', # double dagger
8240: 'permil', # per mille sign
8249: 'lsaquo', # single left-pointing angle quotation mark
8250: 'rsaquo', # single right-pointing angle quotation mark
8364: 'euro', # euro sign
}
g_htmlUniCharEntityPattern = re.compile('[\xa0-\xff]')
def ConvertChar(m):
return '&' + HTML_CHARACTER_ENTITIES[ord(m.group())] + ';'
def UseHtmlCharEntities(text):
if type(text) is not UnicodeType:
text = unicode(text, "utf-8")
new_text, _ignore_num_subst = re.subn(g_htmlUniCharEntityPattern, ConvertChar, text)
return new_text
|
trevor/calendarserver
|
txdav/xml/xmlext.py
|
Python
|
apache-2.0
| 24,968
|
[
"VisIt"
] |
5ed75de64c3e37e0f28a5ea3124817bcad34b5763a45454e606ea326bd41bc5d
|
# Simple Python Library for accessing WS2801 LED stripes
# Copyright (C) 2013 Philipp Tiefenbacher <wizards23@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# For more information about this project please visit:
# http://www.hackerspaceshop.com/ledstrips/raspberrypi-ws2801.html
import spidev
class LedStrip_WS2801(object):
"""Access to SPI with python spidev library."""
# spiDevice has format [
def __init__(self, nLeds, nBuffers=1):
self.spi = spidev.SpiDev() # create spi object
self.spi.open(0, 1)
self.spi.max_speed_hz = 1000000
self.nLeds = nLeds
self.nBuffers = nBuffers
self.buffers = []
for i in range(0, nBuffers):
ba = []
for l in range(0, nLeds):
ba.extend([0, 0, 0])
self.buffers.append(ba)
def close(self):
if (self.spi != None):
self.spi.close()
self.spi = None
def update(self, bufferNr=0):
self.spi.writebytes(self.buffers[bufferNr])
def setAll(self, color, bufferNr=0):
for i in range(0, self.nLeds):
self.setPixel(i, color, bufferNr)
def setPixel(self, index, color, bufferNr=0):
self.buffers[bufferNr][index * 3:index * 3 + 3] = (color[0], color[2], color[1])
class LedStrip_WS2801_FileBased(LedStrip_WS2801):
"""Filebased acces to SPI."""
def __init__(self, nLeds, spiDevice, nBuffers=1):
self.spi = open(spiDevice, "w")
self.nLeds = nLeds
self.nBuffers = nBuffers
self.buffers = []
for i in range(0, nBuffers):
ba = bytearray()
for l in range(0, nLeds):
ba.extend([0, 0, 0])
self.buffers.append(ba)
def update(self, bufferNr=0):
self.spi.write(self.buffers[bufferNr])
self.spi.flush()
|
jsphpl/ws2801-matrix
|
LedStrip_WS2801.py
|
Python
|
mit
| 2,304
|
[
"VisIt"
] |
967cdee5fbf1428ea84a707a05795de24602a651cfdf84c0fb367d3397989b3b
|
"""
Objects for manipulation with VMD molecules.
"""
import logging
import os.path
from Molecule import Molecule as _Molecule
from VMD import molecule as _molecule, molrep as _molrep
__all__ = ['Frames', 'Molecule', 'FORMAT_DCD', 'FORMAT_PARM7', 'FORMAT_PDB', 'FORMAT_PSF', 'FORMATS', 'MOLECULES']
LOGGER = logging.getLogger(__name__)
# File formats
FORMAT_DCD = 'dcd'
FORMAT_PARM7 = 'parm7'
FORMAT_PDB = 'pdb'
FORMAT_PSF = 'psf'
# Dictionary to translate file extensions to file formats
FORMATS = {
'dcd': FORMAT_DCD,
'pdb': FORMAT_PDB,
'psf': FORMAT_PSF,
'prmtop': FORMAT_PARM7,
}
def guess_file_format(filename):
"""
Returns format of the file by guess.
If format can't be detected returns None.
"""
dummy, ext = os.path.splitext(filename)
if not ext or ext == '.':
return None
ext = ext[1:]
# If the extension is not found in dictionary, return it as is
return FORMATS.get(ext, ext)
class Frames(object):
"""
Wrapper for molecules' frames.
"""
def __init__(self, molecule):
"""
@param molecule: Respective molecule
@type molecule: Molecule
"""
# Use molecule instance instead of molid for possible callbacks
assert isinstance(molecule, Molecule)
self.molecule = molecule
def __len__(self):
return _molecule.numframes(self.molecule.molid)
def __delitem__(self, key):
# XXX: For some reason, 'skip' in the 'delframe' function means which frames are left when deleting
# That is not consistent with python slicing, so we have to avoid that argument
if isinstance(key, slice):
start, stop, step = key.indices(len(self))
# We will delete one by one, so we have to that in reversed order
frames = reversed(xrange(start, stop, step))
elif isinstance(key, int):
if key < 0:
frames = [len(self) + key]
else:
frames = [key]
else:
raise TypeError("%s indices must be integers, not %s" % (type(self), type(key)))
for frame in frames:
LOGGER.debug("Deleting frame %d", frame)
_molecule.delframe(self.molecule.molid, beg=frame, end=frame)
def __iter__(self):
# Return the iterator over frames
return iter(xrange(len(self)))
def copy(self, frame=None):
"""
Copies frame and moves the molecule to the new frame.
@param frame: Frame to be copied. If not defined or `None`, active frame is copied.
@type frame: Non-negative integer or `None`
"""
if frame is None:
frame = self.molecule.frame
else:
assert frame >= 0
_molecule.dupframe(self.molecule.molid, frame)
class RepresentationManager(object):
"""
Manager of molecule representations.
"""
def __init__(self, molecule):
"""
@param molecule: Respective molecule
@type molecule: Molecule
"""
# Use molecule instance instead of molid for possible callbacks
assert isinstance(molecule, Molecule)
self.molecule = molecule
def __len__(self):
return _molrep.num(self.molecule.molid)
def __iter__(self):
from pyvmd.representations import Representation
for i in xrange(len(self)):
yield Representation(_molrep.get_repname(self.molecule.molid, i), self.molecule)
def __getitem__(self, key):
from pyvmd.representations import Representation
length = len(self)
if isinstance(key, slice):
start, stop, step = key.indices(length)
# Use recursion for individual representations
return [self[i] for i in xrange(*key.indices(length))]
elif isinstance(key, int):
if key < 0:
index = length + key
else:
index = key
if not 0 <= index < length:
raise IndexError("Index out of range")
return Representation(_molrep.get_repname(self.molecule.molid, index), self.molecule)
elif isinstance(key, basestring):
try:
return Representation(key, self.molecule)
except ValueError:
raise KeyError(key)
else:
raise TypeError("%s indices must be integers, not %s" % (type(self), type(key)))
class Molecule(object):
"""
Molecule representation.
This class is a proxy for molecule loaded into VMD.
"""
def __init__(self, molid):
"""
Creates a new molecule.
@param molid: ID of existing molecule
@type molid: Non-negative integer
"""
assert molid >= 0
if not _molecule.exists(molid):
raise ValueError("Molecule %d does not exist." % molid)
self.molid = molid
self._molecule = None
def __repr__(self):
return "<%s: %s(%d)>" % (type(self).__name__, self.name, self.molid)
def __eq__(self, other):
return type(self) == type(other) and self.molid == other.molid
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def create(cls, name=None):
"""
Creates new molecule.
@name: Name of the molecule.
"""
molid = _molecule.new(name or 'molecule')
return cls(molid)
def delete(self):
"""
Deletes the molecule.
"""
_molecule.delete(self.molid)
def load(self, filename, filetype=None, start=0, stop=-1, step=1, wait=True, volsets=None):
"""
Loads data from file into the molecule.
@param filename: Name of file to be loaded
@type filename: String
@param filetype: Format of file. If not present or `None` it is guessed.
@type filetype: One of `FORMAT_` constants, string or `None`
@param start: First frame to be loaded. Default is first frame in the file.
@type start: Non-negative integer
@param stop: Last frame to be loaded. Default (-1) is last frame in the file.
@type stop: Non-negative integer or -1
@param step: Load every step'th frame. Default is every frame.
@type step: Positive integer
@param wait: Whather to wait until file is completely loaded.
@type wait: Boolean
"""
assert start >= 0
assert stop >= -1
assert step > 0
if filetype is None:
filetype = guess_file_format(filename)
if filetype is None:
raise ValueError("Cannot detect filetype for '%s'" % filename)
waitfor = wait and -1 or 0
volsets = volsets or []
_molecule.read(self.molid, filetype, filename, beg=start, end=stop, skip=step, waitfor=waitfor,
volsets=volsets)
@property
def molecule(self):
"""
Returns respective VMD.Molecule instance.
"""
return _Molecule(id=self.molid)
def _get_frame(self):
return _molecule.get_frame(self.molid)
def _set_frame(self, frame):
"""
Sets the active frame
@type frame: Non-negative integer
"""
assert frame >= 0
_molecule.set_frame(self.molid, frame)
frame = property(_get_frame, _set_frame, doc="Molecule's frame")
@property
def frames(self):
"""
Returns frames descriptor.
"""
return Frames(self)
def _get_name(self):
return _molecule.name(self.molid)
def _set_name(self, name):
_molecule.rename(self.molid, name)
name = property(_get_name, _set_name, doc="Molecule's name")
def _get_visible(self):
return _molecule.get_visible(self.molid)
def _set_visible(self, value):
"""
Sets molecule visibility
@type value: Boolean
"""
_molecule.set_visible(self.molid, value)
visible = property(_get_visible, _set_visible, doc="Visibility")
@property
def representations(self):
"""
Returns molecule representations manager.
"""
return RepresentationManager(self)
class MoleculeManager(object):
"""
Manager of all molecules.
"""
def __init__(self):
self._names = {}
# Fill the cache
self._update()
def _update(self):
# Update the name cache
cache = {}
for molid in _molecule.listall():
name = _molecule.name(molid)
cache.setdefault(name, molid)
self._names = cache
def __len__(self):
return _molecule.num()
def __getitem__(self, key):
"""
Returns molecule specified by name or molid.
If name is not unique, the molecule returned is not defined.
@type key: int or str
@rtype: Molecule
"""
if isinstance(key, int):
assert key >= 0
if _molecule.exists(key):
return Molecule(key)
else:
raise ValueError("Molecule %d doesn't exist." % key)
elif isinstance(key, basestring):
# First check the cached names
if key in self._names:
molid = self._names[key]
if _molecule.exists(molid):
return Molecule(molid)
else:
# The record in cache is obsolete
del self._names[key]
# No luck so far, update the cache
self._update()
if key in self._names:
# We found it after update. Do not check the existence again, we just updated the cache.
return Molecule(self._names[key])
else:
raise ValueError("Molecule '%s' doesn't exist." % key)
else:
raise TypeError("%s indices must be integers or strings, not %s" % (type(self), type(key)))
def __delitem__(self, key):
"""
Deletes molecule specified by name or molid.
If name is not unique, the molecule deleted is not defined.
@type key: int or str
"""
# Use __getitem__ to find out which molecule is to be deleted.
molecule = self.__getitem__(key)
# Clean the cache
self._names.pop(molecule.name)
# Delete molecule
_molecule.delete(molecule.molid)
def __iter__(self):
for molid in _molecule.listall():
yield Molecule(molid)
def __contains__(self, molecule):
return _molecule.exists(molecule.molid)
def _get_top(self):
molid = _molecule.get_top()
# If there are no molecules, VMD returns -1 as molid.
if molid < 0:
raise ValueError("There are no molecules.")
return Molecule(molid)
def _set_top(self, molecule):
_molecule.set_top(molecule.molid)
top = property(_get_top, _set_top, doc="Top molecule")
MOLECULES = MoleculeManager()
|
ziima/pyvmd
|
pyvmd/molecules.py
|
Python
|
gpl-3.0
| 10,984
|
[
"VMD"
] |
e238c377aea05e385358c52462c4aae9bf5552323337e564e9cc8637a97143ba
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Linear Gaussian State Space Model."""
import collections
import functools
import warnings
# Dependency imports
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import identity as identity_bijector
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import independent
from tensorflow_probability.python.distributions import mvn_tril
from tensorflow_probability.python.distributions import normal
from tensorflow_probability.python.experimental import parallel_filter
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow.python.ops import parallel_for # pylint: disable=g-direct-tensorflow-import
tfl = tf.linalg
FilterResults = collections.namedtuple(
'FilterResults',
['log_likelihoods',
'filtered_means',
'filtered_covs',
'predicted_means',
'predicted_covs',
'observation_means',
'observation_covs'])
def _safe_concat(values):
"""Concat along axis=0 that works even when some arguments have size 0."""
initial_value_shape = ps.shape(values[0])
reference_shape = ps.concat([[-1], initial_value_shape[1:]], axis=0)
trivial_shape = ps.concat([[1], initial_value_shape[1:]], axis=0)
full_values = []
for x in values:
try:
full_values.append(ps.reshape(x, reference_shape))
except (TypeError, ValueError):
# JAX/numpy don't like `-1`'s in size-zero shapes.
full_values.append(ps.reshape(x, trivial_shape))
return ps.concat(full_values, axis=0)
def _check_equal_shape(name,
static_shape,
dynamic_shape,
static_target_shape,
dynamic_target_shape=None,
validate_args=True):
"""Check that source and target shape match, statically if possible."""
static_target_shape = tf.TensorShape(static_target_shape)
if tensorshape_util.is_fully_defined(
static_shape) and tensorshape_util.is_fully_defined(static_target_shape):
if static_shape != static_target_shape:
raise ValueError('{}: required shape {} but found {}'.
format(name, static_target_shape, static_shape))
return None
elif validate_args:
if dynamic_target_shape is None:
if tensorshape_util.is_fully_defined(static_target_shape):
dynamic_target_shape = tensorshape_util.as_list(static_target_shape)
else:
raise ValueError('{}: cannot infer target shape: no dynamic shape '
'specified and static shape {} is not fully defined'.
format(name, static_target_shape))
return assert_util.assert_equal(
dynamic_shape,
dynamic_target_shape,
message=('{}: required shape {}'.format(name, static_target_shape)))
def _augment_sample_shape(partial_batch_dist,
full_sample_and_batch_shape,
validate_args=False):
"""Augment a sample shape to broadcast batch dimensions.
Computes an augmented sample shape, so that any batch dimensions not
part of the distribution `partial_batch_dist` are treated as identical
distributions.
# partial_batch_dist.batch_shape = [ 7]
# full_sample_and_batch_shape = [3, 4, 7]
# => return an augmented sample shape of [3, 4] so that
# partial_batch_dist.sample(augmented_sample_shape) has combined
# sample and batch shape of [3, 4, 7].
Args:
partial_batch_dist: `tfd.Distribution` instance with batch shape a
prefix of `full_sample_and_batch_shape`.
full_sample_and_batch_shape: a Tensor or Tensor-like shape.
validate_args: if True, check for shape errors at runtime.
Returns:
augmented_sample_shape: sample shape such that
`partial_batch_dist.sample(augmented_sample_shape)` has combined
sample and batch shape of `full_sample_and_batch_shape`.
Raises:
ValueError: if `partial_batch_dist.batch_shape` has more dimensions than
`full_sample_and_batch_shape`.
NotImplementedError: if broadcasting would be required to make
`partial_batch_dist.batch_shape` into a prefix of
`full_sample_and_batch_shape` .
"""
full_ndims = ps.rank_from_shape(full_sample_and_batch_shape)
partial_batch_ndims = (
tensorshape_util.rank(partial_batch_dist.batch_shape) # pylint: disable=g-long-ternary
if tensorshape_util.rank(partial_batch_dist.batch_shape) is not None
else ps.rank_from_shape(partial_batch_dist.batch_shape_tensor()))
num_broadcast_dims = full_ndims - partial_batch_ndims
expected_partial_batch_shape = (
full_sample_and_batch_shape[num_broadcast_dims:])
expected_partial_batch_shape_static = tf.get_static_value(
full_sample_and_batch_shape[num_broadcast_dims:])
# Raise errors statically if possible.
num_broadcast_dims_static = tf.get_static_value(num_broadcast_dims)
if num_broadcast_dims_static is not None:
if num_broadcast_dims_static < 0:
raise ValueError('Cannot broadcast distribution {} batch shape to '
'target batch shape with fewer dimensions'
.format(partial_batch_dist))
if (expected_partial_batch_shape_static is not None and
tensorshape_util.is_fully_defined(partial_batch_dist.batch_shape)):
if (partial_batch_dist.batch_shape and
any(expected_partial_batch_shape_static != tensorshape_util.as_list(
partial_batch_dist.batch_shape))):
raise NotImplementedError('Broadcasting is not supported; '
'unexpected batch shape '
'(expected {}, saw {}).'.format(
expected_partial_batch_shape_static,
partial_batch_dist.batch_shape
))
runtime_assertions = []
if validate_args:
runtime_assertions.append(
assert_util.assert_greater_equal(
tf.convert_to_tensor(num_broadcast_dims, dtype=tf.int32),
tf.zeros((), dtype=tf.int32),
message=('Cannot broadcast distribution {} batch shape to '
'target batch shape with fewer dimensions.'.format(
partial_batch_dist))))
runtime_assertions.append(
assert_util.assert_equal(
expected_partial_batch_shape,
partial_batch_dist.batch_shape_tensor(),
message=('Broadcasting is not supported; '
'unexpected batch shape.'),
name='assert_batch_shape_same'))
with tf.control_dependencies(runtime_assertions):
return full_sample_and_batch_shape[:num_broadcast_dims]
class LinearGaussianStateSpaceModel(
distribution.AutoCompositeTensorDistribution):
"""Observation distribution from a linear Gaussian state space model.
A linear Gaussian state space model, sometimes called a Kalman filter, posits
a latent state vector `z[t]` of dimension `latent_size` that evolves
over time following linear Gaussian transitions,
```
z[t+1] = F * z[t] + N(b; Q) # latent state
x[t] = H * z[t] + N(c; R) # observed series
```
for transition matrix `F`, transition bias `b` and covariance matrix
`Q`, and observation matrix `H`, bias `c` and covariance matrix `R`. At each
timestep, the model generates an observable vector `x[t]`, a noisy projection
of the latent state. The transition and observation models may be fixed or
may vary between timesteps.
This Distribution represents the marginal distribution on
observations, `p(x)`. The marginal `log_prob` is implemented by
Kalman filtering [1], and `sample` by an efficient forward
recursion. Both operations require time linear in `T`, the total
number of timesteps.
#### Shapes
The event shape is `[num_timesteps, observation_size]`, where
`observation_size` is the dimension of each observation `x[t]`.
The observation and transition models must return consistent
shapes.
This implementation supports vectorized computation over a batch of
models. All of the parameters (prior distribution, transition and
observation operators and noise models) must have a consistent
batch shape.
#### Time-varying processes
Any of the model-defining parameters (prior distribution, transition
and observation operators and noise models) may be specified as a
callable taking an integer timestep `t` and returning a
time-dependent value. The dimensionality (`latent_size` and
`observation_size`) must be the same at all timesteps.
Importantly, the timestep is passed as a `Tensor`, not a Python
integer, so any conditional behavior must occur *inside* the
TensorFlow graph. For example, suppose we want to use a different
transition model on even days than odd days. It does *not* work to
write
```python
def transition_matrix(t):
if t % 2 == 0:
return even_day_matrix
else:
return odd_day_matrix
```
since the value of `t` is not fixed at graph-construction
time. Instead we need to write
```python
def transition_matrix(t):
return tf.cond(tf.equal(tf.mod(t, 2), 0),
lambda : even_day_matrix,
lambda : odd_day_matrix)
```
so that TensorFlow can switch between operators appropriately at
runtime.
#### Examples
Consider a simple tracking model, in which a two-dimensional latent state
represents the position of a vehicle, and at each timestep we
see a noisy observation of this position (e.g., a GPS reading). The
vehicle is assumed to move by a random walk with standard deviation
`step_std` at each step, and observation noise level `std`. We build
the marginal distribution over noisy observations as a state space model:
```python
tfd = tfp.distributions
ndims = 2
step_std = 1.0
noise_std = 5.0
model = tfd.LinearGaussianStateSpaceModel(
num_timesteps=100,
transition_matrix=tf.linalg.LinearOperatorIdentity(ndims),
transition_noise=tfd.MultivariateNormalDiag(
scale_diag=step_std**2 * tf.ones([ndims])),
observation_matrix=tf.linalg.LinearOperatorIdentity(ndims),
observation_noise=tfd.MultivariateNormalDiag(
scale_diag=noise_std**2 * tf.ones([ndims])),
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=tf.ones([ndims])))
```
using the identity matrix for the transition and observation
operators. We can then use this model to generate samples,
compute marginal likelihood of observed sequences, and
perform posterior inference.
```python
x = model.sample(5) # Sample from the prior on sequences of observations.
lp = model.log_prob(x) # Marginal likelihood of a (batch of) observations.
# Compute the filtered posterior on latent states given observations,
# and extract the mean and covariance for the current (final) timestep.
_, filtered_means, filtered_covs, _, _, _, _ = model.forward_filter(x)
current_location_posterior = tfd.MultivariateNormalTriL(
loc=filtered_means[..., -1, :],
scale_tril=tf.linalg.cholesky(filtered_covs[..., -1, :, :]))
# Run a smoothing recursion to extract posterior marginals for locations
# at previous timesteps.
posterior_means, posterior_covs = model.posterior_marginals(x)
initial_location_posterior = tfd.MultivariateNormalTriL(
loc=posterior_means[..., 0, :],
scale_tril=tf.linalg.cholesky(posterior_covs[..., 0, :, :]))
```
* TODO(davmre): show example of fitting parameters.
"""
def __init__(self,
num_timesteps,
transition_matrix,
transition_noise,
observation_matrix,
observation_noise,
initial_state_prior,
initial_step=0,
mask=None,
experimental_parallelize=False, # TODO(b/169178065) Set to True.
validate_args=False,
allow_nan_stats=True,
name='LinearGaussianStateSpaceModel'):
"""Initialize a `LinearGaussianStateSpaceModel`.
Args:
num_timesteps: Integer `Tensor` total number of timesteps.
transition_matrix: A transition operator, represented by a Tensor or
LinearOperator of shape `[latent_size, latent_size]`, or by a
callable taking as argument a scalar integer Tensor `t` and
returning a Tensor or LinearOperator representing the transition
operator from latent state at time `t` to time `t + 1`.
transition_noise: An instance of
`tfd.MultivariateNormalLinearOperator` with event shape
`[latent_size]`, representing the mean and covariance of the
transition noise model, or a callable taking as argument a
scalar integer Tensor `t` and returning such a distribution
representing the noise in the transition from time `t` to time `t + 1`.
observation_matrix: An observation operator, represented by a Tensor
or LinearOperator of shape `[observation_size, latent_size]`,
or by a callable taking as argument a scalar integer Tensor
`t` and returning a timestep-specific Tensor or
LinearOperator.
observation_noise: An instance of
`tfd.MultivariateNormalLinearOperator` with event shape
`[observation_size]`, representing the mean and covariance of
the observation noise model, or a callable taking as argument
a scalar integer Tensor `t` and returning a timestep-specific
noise model.
initial_state_prior: An instance of `MultivariateNormalLinearOperator`
representing the prior distribution on latent states; must
have event shape `[latent_size]`.
initial_step: optional `int` specifying the time of the first
modeled timestep. This is added as an offset when passing
timesteps `t` to (optional) callables specifying
timestep-specific transition and observation models.
mask: Optional default missingness mask used for density and posterior
inference calculations (any method that takes a `mask` argument).
Bool-type `Tensor` with rightmost dimension
`[num_timesteps]`; `True` values specify that the value of `x`
at that timestep is masked, i.e., not conditioned on.
Default value: `None`.
experimental_parallelize: If `True`, use parallel message passing
algorithms from `tfp.experimental.parallel_filter` to perform operations
in `O(log num_timesteps)` sequential steps. The overall FLOP and memory
cost may be larger than for the sequential implementations, though
only by a constant factor.
Default value: `False`.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
self._num_timesteps = ps.convert_to_shape_tensor(
num_timesteps, name='num_timesteps')
self._initial_state_prior = initial_state_prior
self._initial_step = ps.convert_to_shape_tensor(
initial_step, name='initial_step')
# We canonicalize these to LinearOperators below, so no need to do tensor
# conversions here. Either way, we set them as properties to track
# variables from tf.Modules, and to return them as properties.
self._observation_matrix = observation_matrix
self._transition_matrix = transition_matrix
self._transition_noise = transition_noise
self._observation_noise = observation_noise
self._mask = tensor_util.convert_nonref_to_tensor(
mask, dtype_hint=tf.bool, name='mask')
self._experimental_parallelize = experimental_parallelize
# TODO(b/78475680): Friendly dtype inference.
dtype = initial_state_prior.dtype
# Internally, the transition and observation matrices are
# canonicalized as callables returning a LinearOperator. This
# creates no overhead when the model is actually fixed, since in
# that case we simply build the trivial callable that returns
# the same matrix at each timestep.
def _maybe_make_linop(x, is_square=None, name=None):
"""Converts Tensors into LinearOperators."""
if not hasattr(x, 'to_dense'):
x = tfl.LinearOperatorFullMatrix(
tensor_util.convert_nonref_to_tensor(x, dtype=dtype),
is_square=is_square,
name=name)
return x
def _maybe_make_callable_from_linop(f, name, make_square_linop=None):
"""Converts fixed objects into trivial callables."""
if not callable(f):
linop = _maybe_make_linop(f, is_square=make_square_linop, name=name)
f = lambda t: linop
return f
self.get_transition_matrix_for_timestep = (
_maybe_make_callable_from_linop(
transition_matrix,
name='transition_matrix',
make_square_linop=True))
self.get_observation_matrix_for_timestep = (
_maybe_make_callable_from_linop(
observation_matrix, name='observation_matrix'))
# Similarly, we canonicalize the transition and observation
# noise models as callables returning a
# tfd.MultivariateNormalLinearOperator distribution object.
def _maybe_make_callable(f):
if not callable(f):
return lambda t: f
return f
self.get_transition_noise_for_timestep = _maybe_make_callable(
transition_noise)
self.get_observation_noise_for_timestep = _maybe_make_callable(
observation_noise)
latent_size = tf.compat.dimension_value(
initial_state_prior.event_shape[-1])
# We call the get_observation_matrix_for_timestep once so that
# we can infer the observation size. This potentially adds ops
# to the graph, though will not in typical cases (e.g., where
# the callable was generated by wrapping a fixed value using
# _maybe_make_callable above).
initial_observation_linop = self.get_observation_matrix_for_timestep(
self._initial_step)
observation_size = tf.compat.dimension_value(
initial_observation_linop.shape[-2])
self._latent_size = latent_size
self._observation_size = observation_size
super(LinearGaussianStateSpaceModel, self).__init__(
dtype=dtype,
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
@property
def mask(self):
return self._mask
@property
def num_timesteps(self):
return self._num_timesteps
@property
def transition_matrix(self):
return self._transition_matrix
@property
def transition_noise(self):
return self._transition_noise
@property
def observation_matrix(self):
return self._observation_matrix
@property
def observation_noise(self):
return self._observation_noise
@property
def initial_state_prior(self):
return self._initial_state_prior
@property
def experimental_parallelize(self):
return self._experimental_parallelize
@property
def initial_step(self):
return self._initial_step
def _final_step(self):
with self._name_and_control_scope('final_step'):
return self.initial_step + self._num_timesteps
def latent_size_tensor(self):
with self._name_and_control_scope('latent_size_tensor'):
return self._latent_size_tensor_no_checks()
def _latent_size_tensor_no_checks(self):
if self._latent_size is None:
return distribution_util.prefer_static_value(
self.initial_state_prior.event_shape_tensor())[-1]
else:
return self._latent_size
def observation_size_tensor(self):
with self._name_and_control_scope('observation_size_tensor'):
return self._observation_size_tensor_no_checks()
def _observation_size_tensor_no_checks(self):
initial_observation_linop = self.get_observation_matrix_for_timestep(
self.initial_step)
return distribution_util.prefer_static_value(
initial_observation_linop.shape_tensor())[-2]
def backward_smoothing_pass(self,
filtered_means,
filtered_covs,
predicted_means,
predicted_covs):
"""Run the backward pass in Kalman smoother.
The backward smoothing is using Rauch, Tung and Striebel smoother as
as discussed in section 18.3.2 of Kevin P. Murphy, 2012, Machine Learning:
A Probabilistic Perspective, The MIT Press. The inputs are returned by
`forward_filter` function.
Args:
filtered_means: Means of the per-timestep filtered marginal
distributions p(z[t] | x[:t]), as a Tensor of shape
`sample_shape(x) + batch_shape + [num_timesteps, latent_size]`.
filtered_covs: Covariances of the per-timestep filtered marginal
distributions p(z[t] | x[:t]), as a Tensor of shape
`sample_shape(x) + batch_shape + [num_timesteps, latent_size,
latent_size]`.
predicted_means: Means of the per-timestep predictive
distributions over latent states, p(z[t+1] | x[:t]), as a
Tensor of shape `sample_shape(x) + batch_shape +
[num_timesteps, latent_size]`.
predicted_covs: Covariances of the per-timestep predictive
distributions over latent states, p(z[t+1] | x[:t]), as a
Tensor of shape `sample_shape(x) + batch_shape +
[num_timesteps, latent_size, latent_size]`.
Returns:
posterior_means: Means of the smoothed marginal distributions
p(z[t] | x[1:T]), as a Tensor of shape
`sample_shape(x) + batch_shape + [num_timesteps, latent_size]`,
which is of the same shape as filtered_means.
posterior_covs: Covariances of the smoothed marginal distributions
p(z[t] | x[1:T]), as a Tensor of shape
`sample_shape(x) + batch_shape + [num_timesteps, latent_size,
latent_size]`. which is of the same shape as filtered_covs.
"""
if self.experimental_parallelize:
warnings.warn('Backwards pass parallelization is not yet implemented; '
'using sequential implementation.')
with self._name_and_control_scope('backward_pass'):
filtered_means = tf.convert_to_tensor(
filtered_means, name='filtered_means')
filtered_covs = tf.convert_to_tensor(filtered_covs, name='filtered_covs')
predicted_means = tf.convert_to_tensor(
predicted_means, name='predicted_means')
predicted_covs = tf.convert_to_tensor(
predicted_covs, name='predicted_covs')
# To scan over time dimension, we need to move 'num_timesteps' from the
# event shape to the initial dimension of the tensor.
filtered_means = distribution_util.move_dimension(filtered_means, -2, 0)
filtered_covs = distribution_util.move_dimension(filtered_covs, -3, 0)
predicted_means = distribution_util.move_dimension(predicted_means, -2, 0)
predicted_covs = distribution_util.move_dimension(predicted_covs, -3, 0)
# The means are assumed to be vectors. Adding a dummy index to
# ensure the `matmul` op working smoothly.
filtered_means = filtered_means[..., tf.newaxis]
predicted_means = predicted_means[..., tf.newaxis]
initial_backward_mean = predicted_means[-1, ...]
initial_backward_cov = predicted_covs[-1, ...]
num_timesteps = tf.shape(filtered_means)[0]
initial_state = BackwardPassState(
backward_mean=initial_backward_mean,
backward_cov=initial_backward_cov,
timestep=self.initial_step + num_timesteps - 1)
update_step_fn = build_backward_pass_step(
self.get_transition_matrix_for_timestep)
# For backward pass, it scans the `elems` from last to first.
posterior_states = tf.scan(update_step_fn,
elems=(filtered_means,
filtered_covs,
predicted_means,
predicted_covs),
initializer=initial_state,
reverse=True)
# Move the time dimension back into the event shape.
posterior_means = distribution_util.move_dimension(
posterior_states.backward_mean[..., 0], 0, -2)
posterior_covs = distribution_util.move_dimension(
posterior_states.backward_cov, 0, -3)
return (posterior_means, posterior_covs)
def _batch_shape_tensor(self):
# We assume the batch shapes of parameters don't change over time,
# so use the initial step as a prototype.
return functools.reduce(
ps.broadcast_shape,
[
self.initial_state_prior.batch_shape_tensor(),
self.get_transition_matrix_for_timestep(
self.initial_step).batch_shape_tensor(),
self.get_transition_noise_for_timestep(
self.initial_step).batch_shape_tensor(),
self.get_observation_matrix_for_timestep(
self.initial_step).batch_shape_tensor(),
self.get_observation_noise_for_timestep(
self.initial_step).batch_shape_tensor(),
])
def _batch_shape(self):
# We assume the batch shapes of parameters don't change over time,
# so use the initial step as a prototype.
return functools.reduce(
tf.broadcast_static_shape,
[
self.initial_state_prior.batch_shape,
self.get_transition_matrix_for_timestep(
self.initial_step).batch_shape,
self.get_transition_noise_for_timestep(
self.initial_step).batch_shape,
self.get_observation_matrix_for_timestep(
self.initial_step).batch_shape,
self.get_observation_noise_for_timestep(
self.initial_step).batch_shape,
])
def _event_shape(self):
return tf.TensorShape([
tf.get_static_value(self._num_timesteps),
self._observation_size
])
def _event_shape_tensor(self):
return tf.stack(
[self._num_timesteps,
self._observation_size_tensor_no_checks()])
def _get_mask(self, mask):
"""Falls back to `self.mask` if the passed-in mask is None."""
mask = self.mask if mask is None else mask
if mask is not None:
return tf.convert_to_tensor(mask, dtype_hint=tf.bool, name='mask')
return mask
def _get_time_varying_kwargs(self, idx):
"""Extracts model parameters at the given timestep."""
t = idx + self.initial_step
transition_noise = self.get_transition_noise_for_timestep(t)
observation_noise = self.get_observation_noise_for_timestep(t)
return tf.nest.map_structure(
tensor_util.identity_as_tensor,
{'transition_matrix': (
self.get_transition_matrix_for_timestep(t).to_dense()),
'observation_matrix': (
self.get_observation_matrix_for_timestep(t).to_dense()),
'transition_mean': transition_noise.mean(),
'transition_scale_tril': transition_noise.scale.to_dense(),
'observation_mean': observation_noise.mean(),
'observation_scale_tril': observation_noise.scale.to_dense()
})
def _build_model_spec_kwargs_for_parallel_fns(self,
sample_shape=(),
pass_covariance=False):
"""Builds a dict of model parameters across all timesteps."""
kwargs = parallel_for.pfor(self._get_time_varying_kwargs,
self.num_timesteps)
# If given a sample shape, encode it as additional batch dimension(s).
# It is sufficient to do this for one parameter (we use initial_mean),
# since the shape will broadcast to other parameters.
initial_mean = self.initial_state_prior.mean()
initial_mean = ps.broadcast_to(initial_mean,
ps.concat(
[sample_shape,
self.batch_shape_tensor(),
ps.shape(initial_mean)[-1:]],
axis=0))
kwargs['initial_mean'] = initial_mean
kwargs['initial_scale_tril'] = self.initial_state_prior.scale.to_dense()
if pass_covariance: # Build covariance matrices from scale factors.
for tril_key in [k for k in kwargs.keys() if 'scale_tril' in k]:
tril = kwargs.pop(tril_key)
kwargs[tril_key[:-10] + 'cov'] = tf.matmul(tril, tril, transpose_b=True)
return kwargs
def _sample_n(self, n, seed=None):
_, observation_samples = self._joint_sample_n(n, seed=seed)
return observation_samples
def _joint_sample_n(self, n, seed=None):
if self.experimental_parallelize:
x, y = parallel_filter.sample_walk(
seed=seed,
**self._build_model_spec_kwargs_for_parallel_fns(sample_shape=[n]))
return (distribution_util.move_dimension(x, 0, -2),
distribution_util.move_dimension(y, 0, -2))
return self._joint_sample_n_sequential(n, seed=seed)
def _joint_sample_n_sequential(self, n, seed=None):
"""Draw a joint sample from the prior over latents and observations."""
with self._name_and_control_scope('sample_n_joint'):
initial_state_seed, initial_obs_seed, loop_seed = samplers.split_seed(
seed, n=3, salt='LinearGaussianStateSpaceModel_sample_n_joint')
batch_shape = self.batch_shape
if not tensorshape_util.is_fully_defined(batch_shape):
batch_shape = self.batch_shape_tensor()
sample_and_batch_shape = ps.concat([[n], batch_shape], axis=0)
# Sample the initial timestep from the prior. Since we want
# this sample to have full batch shape (not just the batch shape
# of the self.initial_state_prior object which might in general be
# smaller), we augment the sample shape to include whatever
# extra batch dimensions are required.
initial_latent = self.initial_state_prior.sample(
sample_shape=_augment_sample_shape(
self.initial_state_prior,
sample_and_batch_shape,
self.validate_args),
seed=initial_state_seed)
# Add a dummy dimension so that matmul() does matrix-vector
# multiplication.
initial_latent = initial_latent[..., tf.newaxis]
initial_observation_matrix = (
self.get_observation_matrix_for_timestep(self.initial_step))
initial_observation_noise = (
self.get_observation_noise_for_timestep(self.initial_step))
initial_observation_pred = initial_observation_matrix.matmul(
initial_latent)
initial_observation = (initial_observation_pred +
initial_observation_noise.sample(
sample_shape=_augment_sample_shape(
initial_observation_noise,
sample_and_batch_shape,
self.validate_args),
seed=initial_obs_seed)[..., tf.newaxis])
sample_step = build_kalman_sample_step(
self.get_transition_matrix_for_timestep,
self.get_transition_noise_for_timestep,
self.get_observation_matrix_for_timestep,
self.get_observation_noise_for_timestep,
full_sample_and_batch_shape=sample_and_batch_shape,
validate_args=self.validate_args)
# Scan over all timesteps to sample latents and observations.
(latents, observations, _) = tf.scan(
sample_step,
elems=ps.range(self.initial_step + 1, self._final_step()),
initializer=(initial_latent, initial_observation, loop_seed))
# Combine the initial sampled timestep with the remaining timesteps.
latents = _safe_concat([initial_latent[tf.newaxis, ...],
latents])
observations = _safe_concat([initial_observation[tf.newaxis, ...],
observations])
# Put dimensions back in order. The samples we've computed are
# ordered by timestep, with shape `[num_timesteps, num_samples,
# batch_shape, size, 1]` where `size` represents `latent_size`
# or `observation_size` respectively. But timesteps are really
# part of each probabilistic event, so we need to return a Tensor
# of shape `[num_samples, batch_shape, num_timesteps, size]`.
latents = tf.squeeze(latents, -1)
latents = distribution_util.move_dimension(latents, 0, -2)
observations = tf.squeeze(observations, -1)
observations = distribution_util.move_dimension(observations, 0, -2)
return latents, observations
# Stub reimplementation of _prob so we can modify the docstring to include
# the mask.
@distribution_util.AppendDocstring(kwargs_dict={
'mask':
'optional bool-type `Tensor` with rightmost dimension '
'`[num_timesteps]`; `True` values specify that the value of `x` '
'at that timestep is masked, i.e., not conditioned on. Additional '
'dimensions must match or be broadcastable to `self.batch_shape`; any '
'further dimensions must match or be broadcastable to the sample '
'shape of `x`. Default value: `None` (falls back to `self.mask`).'})
def _prob(self, x, mask=None):
return tf.exp(self._log_prob(x, mask=mask))
# Stub reimplementation of _log_prob so we can modify the docstring to include
# the mask.
@distribution_util.AppendDocstring(kwargs_dict={
'mask':
'optional bool-type `Tensor` with rightmost dimension '
'`[num_timesteps]`; `True` values specify that the value of `x` '
'at that timestep is masked, i.e., not conditioned on. Additional '
'dimensions must match or be broadcastable to `self.batch_shape`; any '
'further dimensions must match or be broadcastable to the sample '
'shape of `x`. Default value: `None` (falls back to `self.mask`).'})
def _log_prob(self, x, mask=None):
log_likelihood, _, _, _, _, _, _ = self._forward_filter(
x, mask=mask, final_step_only=True)
return log_likelihood
def forward_filter(self, x, mask=None, final_step_only=False):
"""Run a Kalman filter over a provided sequence of outputs.
Note that the returned values `filtered_means`, `predicted_means`, and
`observation_means` depend on the observed time series `x`, while the
corresponding covariances are independent of the observed series; i.e., they
depend only on the model itself. This means that the mean values have shape
`concat([sample_shape(x), batch_shape, [num_timesteps,
{latent/observation}_size]])`, while the covariances have shape
`concat[(batch_shape, [num_timesteps, {latent/observation}_size,
{latent/observation}_size]])`, which does not depend on the sample shape.
Args:
x: a float-type `Tensor` with rightmost dimensions
`[num_timesteps, observation_size]` matching
`self.event_shape`. Additional dimensions must match or be
broadcastable to `self.batch_shape`; any further dimensions
are interpreted as a sample shape.
mask: optional bool-type `Tensor` with rightmost dimension
`[num_timesteps]`; `True` values specify that the value of `x`
at that timestep is masked, i.e., not conditioned on. Additional
dimensions must match or be broadcastable to `self.batch_shape`; any
further dimensions must match or be broadcastable to the sample
shape of `x`.
Default value: `None` (falls back to `self.mask`).
final_step_only: optional Python `bool`. If `True`, the `num_timesteps`
dimension is omitted from all return values and only the value from the
final timestep is returned (in this case, `log_likelihoods` will
be the *cumulative* log marginal likelihood). This may be significantly
more efficient than returning all values (although note that no
efficiency gain is expected when `self.experimental_parallelize=True`).
Default value: `False`.
Returns:
log_likelihoods: Per-timestep log marginal likelihoods `log
p(x[t] | x[:t-1])` evaluated at the input `x`, as a `Tensor`
of shape `sample_shape(x) + batch_shape + [num_timesteps].`
If `final_step_only` is `True`, this will instead be the
*cumulative* log marginal likelihood at the final step.
filtered_means: Means of the per-timestep filtered marginal
distributions p(z[t] | x[:t]), as a Tensor of shape
`sample_shape(x) + batch_shape + [num_timesteps, latent_size]`.
filtered_covs: Covariances of the per-timestep filtered marginal
distributions p(z[t] | x[:t]), as a Tensor of shape
`sample_shape(x) + batch_shape + [num_timesteps, latent_size,
latent_size]`. Since posterior covariances do not depend on observed
data, some implementations may return a Tensor whose shape omits the
initial `sample_shape(x)`.
predicted_means: Means of the per-timestep predictive
distributions over latent states, p(z[t+1] | x[:t]), as a
Tensor of shape `sample_shape(x) + batch_shape +
[num_timesteps, latent_size]`.
predicted_covs: Covariances of the per-timestep predictive
distributions over latent states, p(z[t+1] | x[:t]), as a
Tensor of shape `sample_shape(x) + batch_shape +
[num_timesteps, latent_size, latent_size]`. Since posterior covariances
do not depend on observed data, some implementations may return a
Tensor whose shape omits the initial `sample_shape(x)`.
observation_means: Means of the per-timestep predictive
distributions over observations, p(x[t] | x[:t-1]), as a
Tensor of shape `sample_shape(x) + batch_shape +
[num_timesteps, observation_size]`.
observation_covs: Covariances of the per-timestep predictive
distributions over observations, p(x[t] | x[:t-1]), as a
Tensor of shape `sample_shape(x) + batch_shape + [num_timesteps,
observation_size, observation_size]`. Since posterior covariances
do not depend on observed data, some implementations may return a
Tensor whose shape omits the initial `sample_shape(x)`.
"""
x = tf.convert_to_tensor(x, name='x')
mask = self._get_mask(mask)
with self._name_and_control_scope('forward_filter', x, {'mask': mask}):
return FilterResults(
*self._forward_filter(x, mask=mask, final_step_only=final_step_only))
def _forward_filter(self, x, mask=None, final_step_only=False):
mask = self._get_mask(mask)
if self.experimental_parallelize:
filter_results = parallel_filter.kalman_filter(
y=distribution_util.move_dimension(x, -2, 0),
mask=(None if mask is None
else distribution_util.move_dimension(mask, -1, 0)),
**self._build_model_spec_kwargs_for_parallel_fns(
pass_covariance=True))
if final_step_only:
# Not clear if/how we can efficiently get *just* the final step from
# parallel filtering, so just do the naive thing for now.
return tf.nest.map_structure(
lambda x: x[-1],
filter_results._replace(
log_likelihoods=tf.cumsum(filter_results.log_likelihoods,
axis=0)))
return tf.nest.map_structure(
lambda x, r: distribution_util.move_dimension(x, 0, -r),
filter_results,
type(filter_results)(1, 2, 3, 2, 3, 2, 3))
return self._forward_filter_sequential(
x, mask=mask, final_step_only=final_step_only)
def _forward_filter_sequential(self, x, mask=None, final_step_only=False):
with tf.name_scope('forward_filter_sequential'):
mask = self._get_mask(mask)
# Get the full output sample_shape + batch shape. Usually
# this will just be x[:-2], i.e. the input shape excluding
# event shape. But users can specify inputs that broadcast
# batch dimensions, so we need to broadcast this against
# self.batch_shape.
batch_shape = self.batch_shape
if not tensorshape_util.is_fully_defined(batch_shape):
batch_shape = self.batch_shape_tensor()
sample_and_batch_shape = functools.reduce(
ps.broadcast_shape, [
ps.shape(x)[:-2],
ps.shape(mask)[:-1] if mask is not None else [],
batch_shape
])
# Get the full output shape for covariances. The posterior variances
# in a LGSSM depend only on the model params (batch shape) and on the
# missingness pattern (mask shape), so in general this may be smaller
# than the full `sample_and_batch_shape`.
mask_sample_and_batch_shape = ps.broadcast_shape(
ps.shape(mask)[:-1] if mask is not None else [],
batch_shape)
# To scan over timesteps we need to move `num_timsteps` from the
# event shape to the initial dimension of the tensor.
x = distribution_util.move_dimension(x, -2, 0)
if mask is not None:
mask = distribution_util.move_dimension(mask, -1, 0)
# Observations are assumed to be vectors, but we add a dummy
# extra dimension to allow us to use `matmul` throughout.
x = x[..., tf.newaxis]
if mask is not None:
# Align mask.shape with x.shape, including a unit dimension to broadcast
# against `observation_size`.
mask = mask[..., tf.newaxis, tf.newaxis]
# Initialize filtering distribution from the prior. The mean in
# a Kalman filter depends on data, so should match the full
# sample and batch shape. The covariance is data-independent, so
# only has batch shape.
latent_size = self.latent_size_tensor()
prior_mean = tf.broadcast_to(
self.initial_state_prior.mean()[..., tf.newaxis],
ps.concat([sample_and_batch_shape,
[latent_size, 1]], axis=0))
prior_cov = tf.broadcast_to(
self.initial_state_prior.covariance(),
ps.concat([mask_sample_and_batch_shape,
[latent_size, latent_size]], axis=0))
initial_observation_matrix = (
self.get_observation_matrix_for_timestep(self.initial_step))
initial_observation_noise = (
self.get_observation_noise_for_timestep(self.initial_step))
initial_observation_mean = _propagate_mean(prior_mean,
initial_observation_matrix,
initial_observation_noise)
initial_observation_cov = _propagate_cov(prior_cov,
initial_observation_matrix,
initial_observation_noise)
initial_state = KalmanFilterState(
predicted_mean=prior_mean,
predicted_cov=prior_cov,
filtered_mean=prior_mean, # establishes shape, value ignored
filtered_cov=prior_cov, # establishes shape, value ignored
observation_mean=initial_observation_mean,
observation_cov=initial_observation_cov,
log_marginal_likelihood=tf.zeros(
shape=sample_and_batch_shape, dtype=self.dtype),
timestep=tf.convert_to_tensor(
self.initial_step, dtype=tf.int32, name='initial_step'))
update_step_fn = build_kalman_filter_step(
self.get_transition_matrix_for_timestep,
self.get_transition_noise_for_timestep,
self.get_observation_matrix_for_timestep,
self.get_observation_noise_for_timestep)
if final_step_only:
# If we don't need intermediate states, then we can use a `while_loop`
# in place of `scan`.
filter_states = tf.while_loop(
cond=lambda *_: True,
body=_build_accumulating_loop_body(
update_step_fn, x=x, mask=mask,
initial_step=initial_state.timestep),
loop_vars=initial_state,
maximum_iterations=ps.size0(x))
else:
filter_states = tf.nest.map_structure(
# Move the time dimension back into the event shape(s).
lambda x, d: distribution_util.move_dimension(x, 0, -(d + 1)),
tf.scan(update_step_fn,
elems=x if mask is None else (x, mask),
initializer=initial_state),
KalmanFilterState(
# Event ranks of each filter state part. Note that means are
# still [D, 1] matrices here (the dummy dimension is stripped
# below).
predicted_mean=2, predicted_cov=2,
filtered_mean=2, filtered_cov=2,
observation_mean=2, observation_cov=2,
log_marginal_likelihood=0, timestep=0))
# We could directly construct the batch Distributions
# filtered_marginals = tfd.MultivariateNormalFullCovariance(
# filtered_means, filtered_covs)
# predicted_marginals = tfd.MultivariateNormalFullCovariance(
# predicted_means, predicted_covs)
# but we choose not to: returning the raw means and covariances
# saves computation in Eager mode (avoiding an immediate
# Cholesky factorization that the user may not want) and aids
# debugging of numerical issues.
return (
filter_states.log_marginal_likelihood,
filter_states.filtered_mean[..., 0], filter_states.filtered_cov,
filter_states.predicted_mean[..., 0], filter_states.predicted_cov,
filter_states.observation_mean[..., 0], filter_states.observation_cov)
def posterior_marginals(self, x, mask=None):
"""Run a Kalman smoother to return posterior mean and cov.
Note that the returned values `smoothed_means` depend on the observed
time series `x`, while the `smoothed_covs` are independent
of the observed series; i.e., they depend only on the model itself.
This means that the mean values have shape `concat([sample_shape(x),
batch_shape, [num_timesteps, {latent/observation}_size]])`,
while the covariances have shape `concat[(batch_shape, [num_timesteps,
{latent/observation}_size, {latent/observation}_size]])`, which
does not depend on the sample shape.
This function only performs smoothing. If the user wants the
intermediate values, which are returned by filtering pass `forward_filter`,
one could get it by:
```
(log_likelihoods,
filtered_means, filtered_covs,
predicted_means, predicted_covs,
observation_means, observation_covs) = model.forward_filter(x)
smoothed_means, smoothed_covs = model.backward_smoothing_pass(
filtered_means, filtered_covs,
predicted_means, predicted_covs)
```
where `x` is an observation sequence.
Args:
x: a float-type `Tensor` with rightmost dimensions
`[num_timesteps, observation_size]` matching
`self.event_shape`. Additional dimensions must match or be
broadcastable to `self.batch_shape`; any further dimensions
are interpreted as a sample shape.
mask: optional bool-type `Tensor` with rightmost dimension
`[num_timesteps]`; `True` values specify that the value of `x`
at that timestep is masked, i.e., not conditioned on. Additional
dimensions must match or be broadcastable to `self.batch_shape`; any
further dimensions must match or be broadcastable to the sample
shape of `x`.
Default value: `None` (falls back to `self.mask`).
Returns:
smoothed_means: Means of the per-timestep smoothed
distributions over latent states, p(z[t] | x[:T]), as a
Tensor of shape `sample_shape(x) + batch_shape +
[num_timesteps, observation_size]`.
smoothed_covs: Covariances of the per-timestep smoothed
distributions over latent states, p(z[t] | x[:T]), as a
Tensor of shape `sample_shape(mask) + batch_shape + [num_timesteps,
observation_size, observation_size]`. Note that the covariances depend
only on the model and the mask, not on the data, so this may have fewer
dimensions than `filtered_means`.
"""
x = tf.convert_to_tensor(x, name='x')
mask = self._get_mask(mask)
with self._name_and_control_scope('smooth', x, {'mask': mask}):
(_, filtered_means, filtered_covs,
predicted_means, predicted_covs, _, _) = self._forward_filter(
x, mask=mask)
(smoothed_means, smoothed_covs) = self.backward_smoothing_pass(
filtered_means, filtered_covs,
predicted_means, predicted_covs)
return (smoothed_means, smoothed_covs)
def posterior_sample(self, x, sample_shape=(), mask=None, seed=None,
name=None):
"""Draws samples from the posterior over latent trajectories.
This method uses Durbin-Koopman sampling [1], an efficient algorithm to
sample from the posterior latents of a linear Gaussian state space model.
The cost of drawing a sample is equal to the cost of drawing a prior
sample (`.sample(sample_shape)`), plus the cost of Kalman smoothing (
`.posterior_marginals(...)` on both the observed time series and the
prior sample. This method is significantly more efficient in graph mode,
because it uses only the posterior means and can elide the unneeded
calculation of marginal covariances.
[1] Durbin, J. and Koopman, S.J. A simple and efficient simulation
smoother for state space time series analysis. _Biometrika_
89(3):603-615, 2002.
https://www.jstor.org/stable/4140605
Args:
x: a float-type `Tensor` with rightmost dimensions
`[num_timesteps, observation_size]` matching
`self.event_shape`. Additional dimensions must match or be
broadcastable with `self.batch_shape`.
sample_shape: `int` `Tensor` shape of samples to draw.
Default value: `()`.
mask: optional bool-type `Tensor` with rightmost dimension
`[num_timesteps]`; `True` values specify that the value of `x`
at that timestep is masked, i.e., not conditioned on. Additional
dimensions must match or be broadcastable with `self.batch_shape` and
`x.shape[:-2]`.
Default value: `None` (falls back to `self.mask`).
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
name: Python `str` name for ops generated by this method.
Returns:
latent_posterior_sample: Float `Tensor` of shape
`concat([sample_shape, batch_shape, [num_timesteps, latent_size]])`,
where `batch_shape` is the broadcast shape of `self.batch_shape`,
`x.shape[:-2]`, and `mask.shape[:-1]`, representing `n` samples from
the posterior over latent states given the observed value `x`.
"""
x = tf.convert_to_tensor(x, name='x')
sample_shape = ps.convert_to_shape_tensor(sample_shape, dtype_hint=tf.int32)
mask = self._get_mask(mask)
with self._name_and_control_scope(
name or 'posterior_sample', x, {'mask': mask}):
# Get static batch shape if possible.
if self.batch_shape.is_fully_defined():
batch_shape = tensorshape_util.as_list(self.batch_shape)
else:
batch_shape = self.batch_shape_tensor()
# Draw one prior sample per result. If `x` has larger batch shape
# than the distribution, we'll need to draw extra samples to match.
result_sample_and_batch_shape = ps.concat([
distribution_util.expand_to_vector(sample_shape),
ps.convert_to_shape_tensor(
functools.reduce(ps.broadcast_shape, [
ps.shape(x)[:-2],
ps.shape(mask)[:-1] if mask is not None else [],
batch_shape]),
dtype_hint=tf.int32)
], axis=0)
sample_size = ps.cast(
ps.reduce_prod(result_sample_and_batch_shape) /
ps.reduce_prod(batch_shape), tf.int32)
prior_latent_sample, prior_obs_sample = self._joint_sample_n(
sample_size, seed=seed)
latent_size = self.latent_size_tensor()
observation_size = ps.shape(prior_obs_sample)[-1]
result_shape = ps.concat(
[result_sample_and_batch_shape,
[self.num_timesteps, latent_size]], axis=0)
broadcast_observed_shape = ps.concat(
[result_sample_and_batch_shape,
[self.num_timesteps, observation_size]], axis=0)
prior_latent_sample = tf.reshape(prior_latent_sample, result_shape)
prior_obs_sample = tf.reshape(prior_obs_sample, broadcast_observed_shape)
# Compute latent posterior means from the sampled and real observations.
batch_mean, _ = self.posterior_marginals(
tf.stack([prior_obs_sample,
tf.broadcast_to(x, broadcast_observed_shape)],
axis=0), mask=mask)
prior_latent_mean, posterior_latent_mean = tf.unstack(batch_mean, axis=0)
return posterior_latent_mean + prior_latent_sample - prior_latent_mean
def _mean(self):
_, observation_mean = self._joint_mean()
return observation_mean
def _joint_mean(self):
"""Compute prior means for all variables via dynamic programming.
Returns:
latent_means: Prior means of latent states `z[t]`, as a `Tensor`
of shape `batch_shape + [num_timesteps, latent_size]`
observation_means: Prior covariance matrices of observations
`x[t]`, as a `Tensor` of shape `batch_shape + [num_timesteps,
observation_size]`
"""
if self.experimental_parallelize:
warnings.warn('Parallelization of prior mean is not yet implemented; '
'using sequential implementation.')
with self._name_and_control_scope('mean_joint'):
# The initial timestep is a special case, since we sample the
# latent state from the prior rather than the transition model.
# Broadcast to ensure we represent the full batch shape.
initial_latent_mean = tf.broadcast_to(
self.initial_state_prior.mean()[..., tf.newaxis],
ps.concat([self.batch_shape_tensor(),
[self.latent_size_tensor(), 1]], axis=0))
initial_observation_mean = _propagate_mean(
initial_latent_mean,
self.get_observation_matrix_for_timestep(self.initial_step),
self.get_observation_noise_for_timestep(self.initial_step))
mean_step = build_kalman_mean_step(
self.get_transition_matrix_for_timestep,
self.get_transition_noise_for_timestep,
self.get_observation_matrix_for_timestep,
self.get_observation_noise_for_timestep)
# Scan over all timesteps following the initial step.
(latent_means, observation_means) = tf.scan(
mean_step,
elems=tf.range(self.initial_step + 1, self._final_step()),
initializer=(initial_latent_mean, initial_observation_mean))
# Squish the initial step back on top of the other (scanned) timesteps
latent_means = _safe_concat([initial_latent_mean[tf.newaxis, ...],
latent_means])
observation_means = _safe_concat([
initial_observation_mean[tf.newaxis, ...],
observation_means])
# Put dimensions back in order. The samples we've computed have
# shape `[num_timesteps, batch_shape, size, 1]`, where `size`
# is the dimension of the latent or observation spaces
# respectively, but we want to return values with shape
# `[batch_shape, num_timesteps, size]`.
latent_means = tf.squeeze(latent_means, -1)
latent_means = distribution_util.move_dimension(latent_means, 0, -2)
observation_means = tf.squeeze(observation_means, -1)
observation_means = distribution_util.move_dimension(
observation_means, 0, -2)
return latent_means, observation_means
def _joint_covariances(self):
"""Compute prior covariances for all variables via dynamic programming.
Returns:
latent_covs: Prior covariance matrices of latent states `z[t]`, as
a `Tensor` of shape `batch_shape + [num_timesteps,
latent_size, latent_size]`
observation_covs: Prior covariance matrices of observations
`x[t]`, as a `Tensor` of shape `batch_shape + [num_timesteps,
observation_size, observation_size]`
"""
if self.experimental_parallelize:
warnings.warn('Parallelization of prior covariance is not yet '
'implemented; using sequential implementation.')
with self._name_and_control_scope('covariance_joint'):
latent_size = self.latent_size_tensor()
initial_latent_cov = tf.broadcast_to(
self.initial_state_prior.covariance(),
ps.concat([self.batch_shape_tensor(),
[latent_size, latent_size]], axis=0))
initial_observation_cov = _propagate_cov(
initial_latent_cov,
self.get_observation_matrix_for_timestep(self.initial_step),
self.get_observation_noise_for_timestep(self.initial_step))
cov_step = build_kalman_cov_step(
self.get_transition_matrix_for_timestep,
self.get_transition_noise_for_timestep,
self.get_observation_matrix_for_timestep,
self.get_observation_noise_for_timestep)
# Scan over all timesteps following the initial step.
(latent_covs, observation_covs) = tf.scan(
cov_step,
elems=tf.range(self.initial_step+1, self._final_step()),
initializer=(initial_latent_cov, initial_observation_cov))
# Squish the initial step back on top of the other (scanned) timesteps
latent_covs = _safe_concat([initial_latent_cov[tf.newaxis, ...],
latent_covs])
observation_covs = _safe_concat([initial_observation_cov[tf.newaxis, ...],
observation_covs])
# Put dimensions back in order. The samples we've computed have
# shape `[num_timesteps, batch_shape, size, size]`, where `size`
# is the dimension of the state or observation spaces
# respectively, but we want to return values with shape
# `[batch_shape, num_timesteps, size, size]`.
latent_covs = distribution_util.move_dimension(latent_covs, 0, -3)
observation_covs = distribution_util.move_dimension(
observation_covs, 0, -3)
return latent_covs, observation_covs
def _variance(self):
_, observation_covs = self._joint_covariances()
return tf.linalg.diag_part(observation_covs)
def latents_to_observations(self, latent_means, latent_covs):
"""Push latent means and covariances forward through the observation model.
Args:
latent_means: float `Tensor` of shape `[..., num_timesteps, latent_size]`
latent_covs: float `Tensor` of shape
`[..., num_timesteps, latent_size, latent_size]`.
Returns:
observation_means: float `Tensor` of shape
`[..., num_timesteps, observation_size]`
observation_covs: float `Tensor` of shape
`[..., num_timesteps, observation_size, observation_size]`
"""
with self._name_and_control_scope('latents_to_observations'):
pushforward_latents_step = build_pushforward_latents_step(
self.get_observation_matrix_for_timestep,
self.get_observation_noise_for_timestep)
latent_means = distribution_util.move_dimension(
latent_means, source_idx=-2, dest_idx=0)
latent_means = latent_means[..., tf.newaxis] # Make matmul happy.
latent_covs = distribution_util.move_dimension(
latent_covs, source_idx=-3, dest_idx=0)
def pfor_body(t):
return pushforward_latents_step(
t=self.initial_step + t,
latent_mean=tf.gather(latent_means, t),
latent_cov=tf.gather(latent_covs, t))
observation_means, observation_covs = parallel_for.pfor(
pfor_body, self._num_timesteps)
observation_means = distribution_util.move_dimension(
observation_means[..., 0], source_idx=0, dest_idx=-2)
observation_covs = distribution_util.move_dimension(
observation_covs, source_idx=0, dest_idx=-3)
return observation_means, observation_covs
def _default_event_space_bijector(self):
return identity_bijector.Identity(validate_args=self.validate_args)
def _sample_control_dependencies(self, x, mask=None):
# Check event shape statically if possible
assertions = []
assertions.append(
_check_equal_shape(
'x',
x.shape[-2:],
tf.shape(x)[-2:],
self.event_shape,
self.event_shape_tensor(),
validate_args=self.validate_args))
mask = self._get_mask(mask)
if mask is not None:
if (tensorshape_util.rank(mask.shape) is None or
tensorshape_util.rank(x.shape) is None):
if self.validate_args:
assertions.append(assert_util.assert_greater_equal(
tf.rank(x),
tf.rank(mask),
message=('mask cannot have higher rank than x!')))
elif tensorshape_util.rank(mask.shape) > tensorshape_util.rank(x.shape):
raise ValueError(
'mask cannot have higher rank than x! ({} vs {})'.format(
tensorshape_util.rank(mask.shape),
tensorshape_util.rank(x.shape)))
assertions.append(_check_equal_shape(
'mask', mask.shape[-1:],
tf.shape(mask)[-1:], self.event_shape[-2:-1],
self.event_shape_tensor()[-2:-1], validate_args=self.validate_args))
return [op for op in assertions if op is not None]
def _parameter_control_dependencies(self, is_init):
# Normally we'd have a path where we'd do the shape checks statically
# regardless of the validate_args setting, but here we don't do that because
# constructing these matrices might be expensive.
if not self.validate_args:
return []
transition_matrix = (
self.get_transition_matrix_for_timestep(self.initial_step))
transition_noise = (
self.get_transition_noise_for_timestep(self.initial_step))
observation_matrix = (
self.get_observation_matrix_for_timestep(self.initial_step))
observation_noise = (
self.get_observation_noise_for_timestep(self.initial_step))
dtype_util.assert_same_float_dtype([
self.initial_state_prior, transition_matrix, transition_noise,
observation_matrix, observation_noise
])
latent_size = self._latent_size_tensor_no_checks()
observation_size = self._observation_size_tensor_no_checks()
latent_size_ = tf.get_static_value(latent_size)
observation_size_ = tf.get_static_value(
observation_size)
assertions = [
_check_equal_shape(
name='transition_matrix',
static_shape=transition_matrix.shape[-2:],
dynamic_shape=transition_matrix.shape_tensor()[-2:],
static_target_shape=[latent_size_, latent_size_],
dynamic_target_shape=[latent_size, latent_size]),
_check_equal_shape(
name='observation_matrix',
static_shape=observation_matrix.shape[-2:],
dynamic_shape=observation_matrix.shape_tensor()[-2:],
static_target_shape=[observation_size_, latent_size_],
dynamic_target_shape=[observation_size, latent_size]),
_check_equal_shape(
name='initial_state_prior',
static_shape=self.initial_state_prior.event_shape,
dynamic_shape=self.initial_state_prior.event_shape_tensor(),
static_target_shape=[latent_size_],
dynamic_target_shape=[latent_size]),
_check_equal_shape(
name='transition_noise',
static_shape=transition_noise.event_shape,
dynamic_shape=transition_noise.event_shape_tensor(),
static_target_shape=[latent_size_],
dynamic_target_shape=[latent_size]),
_check_equal_shape(
name='observation_noise',
static_shape=observation_noise.event_shape,
dynamic_shape=observation_noise.event_shape_tensor(),
static_target_shape=[observation_size_],
dynamic_target_shape=[observation_size])]
return [op for op in assertions if op is not None]
KalmanFilterState = collections.namedtuple('KalmanFilterState', [
'filtered_mean', 'filtered_cov',
'predicted_mean', 'predicted_cov',
'observation_mean', 'observation_cov',
'log_marginal_likelihood', 'timestep'])
BackwardPassState = collections.namedtuple('BackwardPassState', [
'backward_mean', 'backward_cov', 'timestep'])
def build_backward_pass_step(get_transition_matrix_for_timestep):
"""Build a callable that perform one step for backward smoothing.
Args:
get_transition_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[latent_size, latent_size]`.
Returns:
backward_pass_step: a callable that updates a BackwardPassState
from timestep `t` to `t-1`.
"""
def backward_pass_step(state,
filtered_parameters):
"""Run a single step of backward smoothing."""
(filtered_mean, filtered_cov,
predicted_mean, predicted_cov) = filtered_parameters
transition_matrix = get_transition_matrix_for_timestep(state.timestep)
next_posterior_mean = state.backward_mean
next_posterior_cov = state.backward_cov
posterior_mean, posterior_cov = backward_smoothing_update(
filtered_mean,
filtered_cov,
predicted_mean,
predicted_cov,
next_posterior_mean,
next_posterior_cov,
transition_matrix)
return BackwardPassState(backward_mean=posterior_mean,
backward_cov=posterior_cov,
timestep=state.timestep - 1)
return backward_pass_step
def backward_smoothing_update(filtered_mean,
filtered_cov,
predicted_mean,
predicted_cov,
next_posterior_mean,
next_posterior_cov,
transition_matrix):
"""Backward update for a Kalman smoother.
Give the `filtered_mean` mu(t | t), `filtered_cov` sigma(t | t),
`predicted_mean` mu(t+1 | t) and `predicted_cov` sigma(t+1 | t),
as returns from the `forward_filter` function, as well as
`next_posterior_mean` mu(t+1 | 1:T) and `next_posterior_cov` sigma(t+1 | 1:T),
if the `transition_matrix` of states from time t to time t+1
is given as A(t+1), the 1 step backward smoothed distribution parameter
could be calculated as:
p(z(t) | Obs(1:T)) = N( mu(t | 1:T), sigma(t | 1:T)),
mu(t | 1:T) = mu(t | t) + J(t) * (mu(t+1 | 1:T) - mu(t+1 | t)),
sigma(t | 1:T) = sigma(t | t)
+ J(t) * (sigma(t+1 | 1:T) - sigma(t+1 | t) * J(t)',
J(t) = sigma(t | t) * A(t+1)' / sigma(t+1 | t),
where all the multiplications are matrix multiplication, and `/` is
the matrix inverse. J(t) is the backward Kalman gain matrix.
The algorithm can be intialized from mu(T | 1:T) and sigma(T | 1:T),
which are the last step parameters returned by forward_filter.
Args:
filtered_mean: `Tensor` with event shape `[latent_size, 1]` and
batch shape `B`, containing mu(t | t).
filtered_cov: `Tensor` with event shape `[latent_size, latent_size]` and
batch shape `B`, containing sigma(t | t).
predicted_mean: `Tensor` with event shape `[latent_size, 1]` and
batch shape `B`, containing mu(t+1 | t).
predicted_cov: `Tensor` with event shape `[latent_size, latent_size]` and
batch shape `B`, containing sigma(t+1 | t).
next_posterior_mean: `Tensor` with event shape `[latent_size, 1]` and
batch shape `B`, containing mu(t+1 | 1:T).
next_posterior_cov: `Tensor` with event shape `[latent_size, latent_size]`
and batch shape `B`, containing sigma(t+1 | 1:T).
transition_matrix: `LinearOperator` with shape
`[latent_size, latent_size]` and batch shape broadcastable
to `B`.
Returns:
posterior_mean: `Tensor` with event shape `[latent_size, 1]` and
batch shape `B`, containing mu(t | 1:T).
posterior_cov: `Tensor` with event shape `[latent_size, latent_size]` and
batch shape `B`, containing sigma(t | 1:T).
"""
latent_size_is_static_and_scalar = (filtered_cov.shape[-2] == 1)
# Compute backward Kalman gain:
# J = F * T' * P^{-1}
# Since both F(iltered) and P(redictive) are cov matrices,
# thus self-adjoint, we can take the transpose.
# computation:
# = (P^{-1} * T * F)'
# = (P^{-1} * tmp_gain_cov) '
# = (P \ tmp_gain_cov)'
tmp_gain_cov = transition_matrix.matmul(filtered_cov)
if latent_size_is_static_and_scalar:
gain_transpose = tmp_gain_cov / predicted_cov
else:
gain_transpose = tf.linalg.cholesky_solve(
tf.linalg.cholesky(predicted_cov), tmp_gain_cov)
posterior_mean = (filtered_mean +
tf.linalg.matmul(gain_transpose,
next_posterior_mean - predicted_mean,
adjoint_a=True))
posterior_cov = (
filtered_cov +
tf.linalg.matmul(gain_transpose,
tf.linalg.matmul(
next_posterior_cov - predicted_cov, gain_transpose),
adjoint_a=True))
return (posterior_mean, posterior_cov)
def build_kalman_filter_step(get_transition_matrix_for_timestep,
get_transition_noise_for_timestep,
get_observation_matrix_for_timestep,
get_observation_noise_for_timestep):
"""Build a callable that performs one step of Kalman filtering.
Args:
get_transition_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[latent_size, latent_size]`.
get_transition_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[latent_size]`.
get_observation_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[observation_size, observation_size]`.
get_observation_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[observation_size]`.
Returns:
kalman_filter_step: a callable that updates a KalmanFilterState
from timestep `t-1` to `t`.
"""
def kalman_filter_step(state, elems_t):
"""Run a single step of Kalman filtering.
Args:
state: A `KalmanFilterState` object representing the previous
filter state at time `t-1`.
elems_t: A tuple of Tensors `(x[t], mask_t)`, or a `Tensor` `x[t]`.
`x[t]` is a `Tensor` with rightmost shape dimensions
`[observation_size, 1]` representing the vector observed at time `t`,
and `mask_t` is a `Tensor` with rightmost dimensions`[1, 1]`
representing the observation mask at time `t`. Both `x[t]` and `mask_t`
may have batch dimensions, which must be compatible with the batch
dimensions of `state.predicted_mean` and `state.predictived_cov`
respectively. If `mask_t` is not provided, it is assumed to be `None`.
Returns:
new_state: A `KalmanFilterState` object representing the new
filter state at time `t`.
"""
if isinstance(elems_t, tuple):
x_t, mask_t = elems_t
else:
x_t = elems_t
mask_t = None
observation_matrix = get_observation_matrix_for_timestep(state.timestep)
observation_noise = get_observation_noise_for_timestep(state.timestep)
if mask_t is not None:
# Before running the update, fill in masked observations using the prior
# expectation. The precise filled value shouldn't matter since updates
# from masked elements will not be selected below, but we need to ensure
# that any results we incidently compute on masked values are at least
# finite (not inf or NaN) so that they don't screw up gradient propagation
# through `tf.where`, as described in
# https://github.com/tensorflow/tensorflow/issues/2540.
# We fill with the prior expectation because any fixed value such as zero
# might be arbitrarily unlikely under the prior, leading to overflow in
# the updates, but the prior expectation should always be a
# 'reasonable' observation.
x_expected = _propagate_mean(state.predicted_mean,
observation_matrix,
observation_noise) * tf.ones_like(x_t)
x_t = tf.where(mask_t, x_expected, x_t)
# Given predicted mean u_{t|t-1} and covariance P_{t|t-1} from the
# previous step, incorporate the observation x_t, producing the
# filtered mean u_t and covariance P_t.
(filtered_mean,
filtered_cov,
observation_dist) = linear_gaussian_update(
state.predicted_mean, state.predicted_cov,
observation_matrix, observation_noise,
x_t)
# Compute the marginal likelihood p(x[t] | x[:t-1]) for this
# observation.
log_marginal_likelihood = observation_dist.log_prob(x_t[..., 0])
if mask_t is not None:
filtered_mean = tf.where(mask_t, state.predicted_mean, filtered_mean)
filtered_cov = tf.where(mask_t, state.predicted_cov, filtered_cov)
log_marginal_likelihood = tf.where(
mask_t[..., 0, 0], tf.zeros_like(log_marginal_likelihood),
log_marginal_likelihood)
# Run the filtered posterior through the transition
# model to predict the next time step:
# u_{t|t-1} = F_t u_{t-1} + b_t
# P_{t|t-1} = F_t P_{t-1} F_t' + Q_t
predicted_mean, predicted_cov = kalman_transition(
filtered_mean,
filtered_cov,
get_transition_matrix_for_timestep(state.timestep),
get_transition_noise_for_timestep(state.timestep))
return KalmanFilterState(
filtered_mean, filtered_cov,
predicted_mean, predicted_cov,
observation_dist.mean()[..., tf.newaxis],
_get_covariance_no_broadcast(observation_dist),
log_marginal_likelihood,
state.timestep+1)
return kalman_filter_step
def _build_accumulating_loop_body(kalman_filter_step_fn, x, mask, initial_step):
"""Wraps a Kalman filter step to accumulate the marginal likelihood."""
def accumulating_loop_body(*kalman_filter_state_parts):
previous_filter_state = KalmanFilterState(*kalman_filter_state_parts)
new_filter_state = kalman_filter_step_fn(
state=previous_filter_state,
elems_t=tf.nest.map_structure( # Get observations for this timestep.
lambda v: tf.gather( # pylint: disable=g-long-lambda
v, previous_filter_state.timestep - initial_step),
x if mask is None else (x, mask)))
return new_filter_state._replace(log_marginal_likelihood=(
previous_filter_state.log_marginal_likelihood + # Total accumulated.
new_filter_state.log_marginal_likelihood)) # Increment from this step.
return accumulating_loop_body
def linear_gaussian_update(
prior_mean, prior_cov, observation_matrix, observation_noise, x_observed):
"""Conjugate update for a linear Gaussian model.
Given a normal prior on a latent variable `z`,
`p(z) = N(prior_mean, prior_cov) = N(u, P)`,
for which we observe a linear Gaussian transformation `x`,
`p(x|z) = N(H * z + c, R)`,
the posterior is also normal:
`p(z|x) = N(u*, P*)`.
We can write this update as
x_expected = H * u + c # pushforward prior mean
S = R + H * P * H' # pushforward prior cov
K = P * H' * S^{-1} # optimal Kalman gain
u* = u + K * (x_observed - x_expected) # posterior mean
P* = (I - K * H) * P (I - K * H)' + K * R * K' # posterior cov
(see, e.g., https://en.wikipedia.org/wiki/Kalman_filter#Update)
Args:
prior_mean: `Tensor` with event shape `[latent_size, 1]` and
potential batch shape `B = [b1, ..., b_n]`.
prior_cov: `Tensor` with event shape `[latent_size, latent_size]`
and batch shape `B` (matching `prior_mean`).
observation_matrix: `LinearOperator` with shape
`[observation_size, latent_size]` and batch shape broadcastable
to `B`.
observation_noise: potentially-batched
`MultivariateNormalLinearOperator` instance with event shape
`[observation_size]` and batch shape broadcastable to `B`.
x_observed: potentially batched `Tensor` with event shape
`[observation_size, 1]` and batch shape `B`.
Returns:
posterior_mean: `Tensor` with event shape `[latent_size, 1]` and
batch shape `B`.
posterior_cov: `Tensor` with event shape `[latent_size,
latent_size]` and batch shape `B`.
predictive_dist: the prior predictive distribution `p(x|z)`,
as a `Distribution` instance with event
shape `[observation_size]` and batch shape `B`. This will
typically be `tfd.MultivariateNormalTriL`, but when
`observation_size=1` we return a `tfd.Independent(tfd.Normal)`
instance as an optimization.
"""
# If observations are scalar, we can avoid some matrix ops.
observation_size_is_static_and_scalar = (observation_matrix.shape[-2] == 1)
# Push the predicted mean for the latent state through the
# observation model
x_expected = _propagate_mean(prior_mean,
observation_matrix,
observation_noise)
# Push the predictive covariance of the latent state through the
# observation model:
# S = R + H * P * H'.
# We use a temporary variable for H * P,
# reused below to compute Kalman gain.
tmp_obs_cov = observation_matrix.matmul(prior_cov)
predicted_obs_cov = (
observation_matrix.matmul(tmp_obs_cov, adjoint_arg=True)
+ observation_noise.covariance())
# Compute optimal Kalman gain:
# K = P * H' * S^{-1}
# Since both S and P are cov matrices, thus symmetric,
# we can take the transpose and reuse our previous
# computation:
# = (S^{-1} * H * P)'
# = (S^{-1} * tmp_obs_cov) '
# = (S \ tmp_obs_cov)'
if observation_size_is_static_and_scalar:
gain_transpose = tmp_obs_cov / predicted_obs_cov
else:
predicted_obs_cov_chol = tf.linalg.cholesky(predicted_obs_cov)
gain_transpose = tf.linalg.cholesky_solve(predicted_obs_cov_chol,
tmp_obs_cov)
# Compute the posterior mean, incorporating the observation.
# u* = u + K (x_observed - x_expected)
posterior_mean = (prior_mean +
tf.linalg.matmul(gain_transpose, x_observed - x_expected,
adjoint_a=True))
# For the posterior covariance, we could use the simple update
# P* = P - K * H * P
# but this is prone to numerical issues because it subtracts a
# value from a PSD matrix. We choose instead to use the more
# expensive Jordan form update
# P* = (I - K H) * P * (I - K H)' + K R K'
# which always produces a PSD result. This uses
# tmp_term = (I - K * H)'
# as an intermediate quantity.
tmp_term = -observation_matrix.matmul(gain_transpose, adjoint=True) # -K * H
tmp_term = tf.linalg.set_diag(tmp_term, tf.linalg.diag_part(tmp_term) + 1)
posterior_cov = (
tf.linalg.matmul(
tmp_term, tf.linalg.matmul(prior_cov, tmp_term), adjoint_a=True)
+ tf.linalg.matmul(gain_transpose,
tf.linalg.matmul(
observation_noise.covariance(), gain_transpose),
adjoint_a=True))
if observation_size_is_static_and_scalar:
# A plain Normal would have event shape `[]`; wrapping with Independent
# ensures `event_shape=[1]` as required.
predictive_dist = independent.Independent(
normal.Normal(loc=x_expected[..., 0],
scale=tf.sqrt(predicted_obs_cov[..., 0])),
reinterpreted_batch_ndims=1)
else:
predictive_dist = mvn_tril.MultivariateNormalTriL(
loc=x_expected[..., 0],
scale_tril=predicted_obs_cov_chol)
return posterior_mean, posterior_cov, predictive_dist
def kalman_transition(filtered_mean, filtered_cov,
transition_matrix, transition_noise):
"""Propagate a filtered distribution through a transition model."""
predicted_mean = _propagate_mean(filtered_mean,
transition_matrix,
transition_noise)
predicted_cov = _propagate_cov(filtered_cov,
transition_matrix,
transition_noise)
return predicted_mean, predicted_cov
def build_kalman_mean_step(get_transition_matrix_for_timestep,
get_transition_noise_for_timestep,
get_observation_matrix_for_timestep,
get_observation_noise_for_timestep):
"""Build a callable that performs one step of Kalman mean recursion.
Args:
get_transition_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[latent_size, latent_size]`.
get_transition_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[latent_size]`.
get_observation_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[observation_size, observation_size]`.
get_observation_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[observation_size]`.
Returns:
kalman_mean_step: a callable that computes latent state and
observation means at time `t`, given latent mean at time `t-1`.
"""
def mean_step(previous_means, t):
"""Single step of prior mean recursion."""
previous_latent_mean, _ = previous_means
latent_mean = _propagate_mean(previous_latent_mean,
get_transition_matrix_for_timestep(t - 1),
get_transition_noise_for_timestep(t - 1))
observation_mean = _propagate_mean(latent_mean,
get_observation_matrix_for_timestep(t),
get_observation_noise_for_timestep(t))
return (latent_mean, observation_mean)
return mean_step
def build_kalman_cov_step(get_transition_matrix_for_timestep,
get_transition_noise_for_timestep,
get_observation_matrix_for_timestep,
get_observation_noise_for_timestep):
"""Build a callable for one step of Kalman covariance recursion.
Args:
get_transition_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[latent_size, latent_size]`.
get_transition_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[latent_size]`.
get_observation_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[observation_size, observation_size]`.
get_observation_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[observation_size]`.
Returns:
cov_step: a callable that computes latent state and observation
covariance at time `t`, given latent covariance at time `t-1`.
"""
def cov_step(previous_covs, t):
"""Single step of prior covariance recursion."""
previous_latent_cov, _ = previous_covs
latent_cov = _propagate_cov(
previous_latent_cov,
get_transition_matrix_for_timestep(t - 1),
get_transition_noise_for_timestep(t - 1))
observation_cov = _propagate_cov(
latent_cov,
get_observation_matrix_for_timestep(t),
get_observation_noise_for_timestep(t))
return (latent_cov, observation_cov)
return cov_step
def build_kalman_sample_step(get_transition_matrix_for_timestep,
get_transition_noise_for_timestep,
get_observation_matrix_for_timestep,
get_observation_noise_for_timestep,
full_sample_and_batch_shape,
validate_args=False):
"""Build a callable for one step of Kalman sampling recursion.
Args:
get_transition_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[latent_size, latent_size]`.
get_transition_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[latent_size]`.
get_observation_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[observation_size, observation_size]`.
get_observation_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[observation_size]`.
full_sample_and_batch_shape: Desired sample and batch shape of the
returned samples, concatenated in a single `Tensor`.
validate_args: if True, perform error checking at runtime.
Returns:
sample_step: a callable that samples the latent state and
observation at time `t`, given latent state at time `t-1`.
"""
def sample_step(sampled_prev, t):
"""Sample values for a single timestep."""
latent_prev, _, seed = sampled_prev
(transition_noise_seed,
observation_noise_seed,
next_seed) = samplers.split_seed(seed, n=3)
transition_matrix = get_transition_matrix_for_timestep(t - 1)
transition_noise = get_transition_noise_for_timestep(t - 1)
latent_pred = transition_matrix.matmul(latent_prev)
latent_sampled = latent_pred + transition_noise.sample(
sample_shape=_augment_sample_shape(
transition_noise,
full_sample_and_batch_shape,
validate_args),
seed=transition_noise_seed)[..., tf.newaxis]
observation_matrix = get_observation_matrix_for_timestep(t)
observation_noise = get_observation_noise_for_timestep(t)
observation_pred = observation_matrix.matmul(latent_sampled)
observation_sampled = observation_pred + observation_noise.sample(
sample_shape=_augment_sample_shape(
observation_noise,
full_sample_and_batch_shape,
validate_args),
seed=observation_noise_seed)[..., tf.newaxis]
return (latent_sampled, observation_sampled, next_seed)
return sample_step
def build_pushforward_latents_step(get_observation_matrix_for_timestep,
get_observation_noise_for_timestep):
"""Build a callable to push latent means/covs to observed means/covs.
Args:
get_observation_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[observation_size, observation_size]`.
get_observation_noise_for_timestep: callable taking a timestep as
an integer `Tensor` argument, and returning a
`MultivariateNormalLinearOperator` of event shape
`[observation_size]`.
Returns:
pushforward_latents_step: a callable that computes the observation mean and
covariance at time `t`, given latent mean and covariance at time `t`.
"""
def pushforward_latents_step(t, latent_mean, latent_cov):
"""Loop body fn to pushforward latents to observations at a time step."""
observation_matrix = get_observation_matrix_for_timestep(t)
observation_noise = get_observation_noise_for_timestep(t)
observation_mean = _propagate_mean(latent_mean,
observation_matrix,
observation_noise)
observation_cov = _propagate_cov(latent_cov,
observation_matrix,
observation_noise)
return (observation_mean, observation_cov)
return pushforward_latents_step
def _propagate_mean(mean, linop, dist):
"""Propagate a mean through linear Gaussian transformation."""
return linop.matmul(mean) + dist.mean()[..., tf.newaxis]
def _propagate_cov(cov, linop, dist):
"""Propagate covariance through linear Gaussian transformation."""
# For linop A and input cov P, returns `A P A' + dist.cov()`
return linop.matmul(linop.matmul(cov), adjoint_arg=True) + dist.covariance()
def _get_covariance_no_broadcast(dist):
"""Returns `dist.covariance()` ignoring any batch shape from `dist.loc`."""
if hasattr(dist, 'reinterpreted_batch_ndims'):
# Dist is Independent(Normal).
return tf.linalg.diag(dist.distribution.scale ** 2)
elif hasattr(dist, 'cov_operator'):
# Dist is MultivariateNormalLowRankUpdateLinearOperatorCovariance.
return dist.cov_operator.to_dense()
elif hasattr(dist, 'scale') and hasattr(dist.scale, 'matmul'):
# Dist is MultivariateNormalLinearOperator.
return dist.scale.matmul(dist.scale, adjoint_arg=True).to_dense()
raise ValueError(
'Could not compute unbroadcast covariance of distribution {}.'.format(
dist))
|
tensorflow/probability
|
tensorflow_probability/python/distributions/linear_gaussian_ssm.py
|
Python
|
apache-2.0
| 92,122
|
[
"Gaussian"
] |
2f780b6a5f3f7a90339cca3749b273a335e3d6ecebfde71a697ecd27d01b9e0c
|
#!/usr/bin/python
from subprocess import call
ref = raw_input("FASTA reference file: ")
lib1 = raw_input("FASTQ read library 1 file 1: ")
lib2 = raw_input("FASTQ read library 2 file 1: ")
threads = raw_input("number of threads: ")
try:
open(ref+".pac")
open(ref+".ann")
open(ref+".amb")
open(ref+".bwt")
open(ref+".sa")
except:
call("bwa index -a bwtsw %s" % ref, shell=True)
call("stampy.py --species=lmig --assembly=1 -G lmig1 %s" % ref, shell=True)
call("stampy.py -g lmig1 -H lmig1")
call("bwa aln -t%s %s %s > read1.sai" % (threads, ref, lib1), shell=True)
call("bwa aln -t%s %s %s > read2.sai" % (threads, ref, lib2), shell=True)
call("bwa sampe -s -r \042@RG\tID:1\tLB:1\tSM:1\042 %s read1.sai read2.sai %s %s | samtools view -bS - > align_fastq.bam" % (ref, lib1, lib2), shell=True)
call("stampy.py -g lmig1 -h lmig1 -t %s --substitutionrate=0.10 --bamkeepgoodreads -M align_fastq.bam > mapping.sam" % threads, shell=True)
#call("samtools sort align_fastq.bam align_sort", shell=True)
#call("samtools index align_sort.bam", shell=True)
#call("samtools flagstat align_sort.bam > align_sort.flagstat", shell=True)
|
fjruizruano/ngs-protocols
|
stampy_protocol.py
|
Python
|
gpl-3.0
| 1,138
|
[
"BWA"
] |
ef3d8d95149983219af7300e59f4cf024dea0e7b5710e3a3c6a4b838d3f5306d
|
import matplotlib.pyplot as plt
import numpy as np
import mpl_toolkits.axisartist as AA
from mpl_toolkits.axes_grid1 import host_subplot
from pprint import pprint
from matplotlib import rcParams
from matplotlib.mlab import psd
from mpl_toolkits.mplot3d import Axes3D
rcParams['xtick.direction'] = 'in'
rcParams['ytick.direction'] = 'in'
rcParams['ytick.major.pad'] = 12
rcParams['font.family'] = 'serif'
rcParams['font.serif'] = ['Computer Modern Roman']
box = dict(facecolor=None, pad=15, alpha=0)
def angle_plot(one,two=None):
if not two:
two=one
#must take the product of the columns
angles = np.array([np.inner(first,second)/(np.inner(first,first)*np.inner(second,second))
for first,second in zip(one.transpose(),two.transpose())])
print angles
def adjust_spines(ax,spines=['bottom','left']):
''' Taken from http://matplotlib.org/examples/pylab_examples/spine_placement_demo.html '''
for loc, spine in ax.spines.iteritems():
if loc in spines:
spine.set_position(('outward',10))
#spine.set_smart_bounds(True) #Doesn't work for log log plots
spine.set_linewidth(1)
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
def simple_raster(spiketimes,ax,color='k'):
for neuron,times in spiketimes.iteritems():
ax.vlines(times,neuron+0.5,neuron+1.5,color=color)
def power_spectrum(data,Fs=20000, savename=None,show=True, cutoff=50):
p = Periodogram(data,sampling=Fs)
p.run()
p.plot()
'''
#stop = np.where(freqs>cutoff)[0][0]
#print stop
fig = plt.figure()
ax = fig.add_subplot(111)
spec, = ax.plot(freqs,db,'o-')
adjust_spines(ax,['bottom','left'])
ax.set_xlabel(r'frequency $\left(Hz\right)$')
ax.set_ylabel(r'Power $\left(dB\right)$')
'''
if show:
plt.show()
if savename:
plt.savefig(savename,dpi=72)
def scree_plot(eigVals,cutoff=0.95,savename=None, show=False,save=True,savebase=None):
#Assume the list is all of the eigenvalues
rel = np.cumsum(eigVals)/eigVals.sum()
x = np.arange(len(rel))+1
print eigVals.shape
fig = plt.figure()
ax = fig.add_subplot(111)
line, = ax.plot(x,rel)
line.set_clip_on(False)
adjust_spines(ax,['bottom','left'])
ax.set_xlabel(r'$\LARGE \lambda$')
ax.set_ylabel('Fraction of variance')
ax.set_xlim(0,len(eigVals))
cutoff_idx = np.where(rel>cutoff)[0][0]
ax.axvline(x=cutoff_idx, color='r',linestyle='--', linewidth=2)
ax.axhline(y=rel[cutoff_idx],color='r',linestyle='--',linewidth=2)
ax.tick_params(direction='in')
ax.annotate(r" {\Large $\mathbf{\lambda=%d}$}" % cutoff_idx,xy=(.25, .9), xycoords='axes fraction',
horizontalalignment='center', verticalalignment='center')
plt.tight_layout()
if save:
print savebase
plt.savefig(savebase+'_scree.png',dpi=100)
if show:
plt.show()
plt.close()
def spike_validation(data,clusters,spiketimes=None,eiglist=None,nclus=None,savebase='res',waveforms=None,multi=False, show=False
,save=True, adj=False):
best = clusters['models'][np.argmax(clusters['silhouettes'])]
nclus = best.n_clusters if not nclus else nclus
fig = plt.figure()
plt.subplots_adjust(left=0.1, right=0.9, bottom=0.1, top=.97)
#Clusters of waveforms projected onto the first two principal components
ax = fig.add_subplot(2,2,1, projection='3d')
ax.set_axis_bgcolor('white')
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
labels_ = best.labels_
centers = best.cluster_centers_
unique_labels = np.unique(labels_)
for n,col in zip(range(nclus),colors):
my_members = labels_ == n
cluster_center = centers[n]
ax.scatter(data[0,my_members],data[1,my_members],data[2,my_members],c=col, s=6)
plt.hold(True)
ax.scatter(cluster_center[0],cluster_center[1],cluster_center[2],c=col,s=8)
#adjust_spines(ax,['bottom','left'])
ax.set_ylabel(r'\Large \textbf{PC2}')
ax.set_xlabel(r'\Large \textbf{PC1}')
ax.set_zlabel(r'\Large \textbf{PC3}')
ax.set_xticklabels('')
ax.set_yticklabels('')
ax.set_zticklabels('')
plt.tight_layout()
if waveforms is not None:
print 'drawing wfs'
wfs = fig.add_subplot(2,2,3)#axes([0.37, 0.65, 0.1, 0.15])
wfs.set_axis_bgcolor('none')
artists = []
for n,col in zip(range(nclus),colors):
#my_members = labels_[:-300]== n
my_members = labels_ == n
print len(my_members)
print waveforms.shape
line, = wfs.plot(np.average(waveforms[my_members,:],axis=0),col,linewidth=2)
line.set_clip_on(False)
adjust_spines(wfs,['bottom','left'])
if not adj:
wfs.set_yticks([0,100])
wfs.set_yticklabels([r'$0$', r'$100 \; \mu V$'],rotation='vertical')
wfs.set_xticks([0,100])
wfs.set_xticklabels([r'$0$',r'$800 \; \mu s$'])
wfs.spines['bottom'].set_bounds(0,100)
else:
wfs.set_yticks([-1000,0,1000])
wfs.set_yticklabels([r'$-100 \; \mu V$',r'$0$', r'$100 \; \mu V$'],rotation='vertical')
wfs.set_xticks([0,16,32])
wfs.set_xticklabels([r'$0$',r'$400 \; \mu s$',r'$800 \; \mu s$'])
wfs.spines['bottom'].set_bounds(0,32)
sils = fig.add_subplot(2,2,2)
sils.set_axis_bgcolor('none')
markerline, stemlines,baseline =sils.stem(np.arange(len(clusters['silhouettes'])),clusters['silhouettes'])
sils.tick_params(direction='in')
#sils.axhline(y=0.5,color='r',linestyle='--',linewidth=2)
adjust_spines(sils,['bottom','left'])
sils.set_xticks(np.arange(len(clusters['silhouettes']))+1)
sils.set_yticks([-1,0,1])
sils.set_ylabel('Silhouette coefficient')
sils.set_xlabel('Number of clusters')
sils.set_xlim((0.5,len(clusters['silhouettes'])))
xmx=100
if spiketimes is not None:
#break of up the spiketime vector based on clustering
short_isi = fig.add_axes([0.8, 0.26, 0.15, 0.20])
isi = fig.add_subplot(2,2,4)
for n,col in zip(range(nclus),colors):
#my_members = labels_[:-300]== n #Always add 3000 noise spikes
my_members = labels_ == n
these_isis = 0.1*np.diff(spiketimes[my_members])
these_isis = these_isis[these_isis>1]
if these_isis.size:
_,_,patches=isi.hist(these_isis, histtype='stepfilled', range=(0,1000),
alpha=0.5, bins=50)
adjust_spines(isi,['bottom','left'])
plt.setp(patches,'facecolor',col)
_,_,spatches=short_isi.hist(these_isis,range=(0,100), histtype='stepfilled')
plt.setp(spatches,'facecolor',col)
isi.tick_params(direction='in')
isi.set_axis_bgcolor('none')
isi.set_ylabel(r'\# of spikes')
isi.set_xlabel(r'Interspike interval $(ms)$')
isi.set_xlim(xmax=xmx)
short_isi.set_axis_bgcolor('none')
adjust_spines(short_isi,['bottom','left'])
short_isi.tick_params(direction='in')
short_isi.set_ylabel(r'\# of Spikes')
#short_isi.set_yticks(np.arange(8))
short_isi.axvline(x=2,c='r',linewidth=2)
short_isi.set_xlabel(r'ISI $(ms)$')
short_isi.set_xticklabels(np.arange(0,12)[::2])
if eiglist is not None and eiglist.size:
eigfxns = fig.add_subplot(2,2,3)
eigfxns.set_axis_bgcolor('none')
eigfxns.tick_params(direction='in')
#Assume 6 eigenfunctions
nfxns =6
span = len(eiglist[0,:])/2
print span
x = arange(2*span) if multi else np.arange(-span,span)
for i in range(nfxns):
eigfxns.plot(x,i+eiglist[i,:],'b',linewidth=2)
plt.hold(True)
adjust_spines(eigfxns,['bottom','left'])
if multi:
eigfxns.set_xlabel(r' $\left(\mu sec\right)$')
else:
eigfxns.set_xlabel(r'Time from spike peak $\left(\mu sec\right)$')
eigfxns.set_xticklabels([r'\textbf{%d}'%(32*(i-5)) for i in range(10)])
eigfxns.set_yticklabels([' '] + [r' $e_{%d}$' %i for i in range(1,nfxns+1) ])
eigfxns.set_ylabel(r'Eigenfunctions')
#draw_sizebar(eigfxns)
plt.tight_layout()
plt.savefig(savebase+'_validation.png', bbox_inches='tight')
if show:
plt.show()
def voltage_trace(unfiltered=None,filtered=None,threshold = 0, roi=30000,spread=10000,save=None,
show=False, fs = 20000, downsampling= 10,savebase=None):
fig = plt.figure()
trace_panel = fig.add_subplot(211,axisbg='none')
start = roi-spread
stop = roi+spread
traces, = trace_panel.plot(unfiltered[start:stop][::downsampling],'b') #Downsample just for display
spike_panel = fig.add_subplot(212,axisbg='none',sharex=trace_panel)
spikes, = spike_panel.plot(filtered[start:stop][::downsampling],'b')
panels = [trace_panel,spike_panel]
for panel in panels:
adjust_spines(panel,['bottom','left'])
trace_panel.set_xlabel(r'time $\left(s\right)$')
trace_panel.set_ylabel(r'voltage $ \left(\mu V \right)$')
trace_panel.set_xticklabels(np.arange(start/fs,1.5+stop/fs,0.5).astype(str))
spike_panel.set_xlabel(r'time $\left(s\right)$')
spike_panel.set_xticklabels(np.arange(start/fs,1.5+stop/fs,0.5).astype(str))
spike_panel.set_ylabel(r'voltage $\left(\mu V \right)$')
#Draw threshold
spike_panel.axhline(y=threshold,linewidth=1,color='r',linestyle='--')
spike_panel.axhline(y=-threshold,linewidth=1,color='r',linestyle='--')
plt.tight_layout()
if save:
print savebase
plt.savefig(savebase+'_voltage.png',dpi=100)
if show:
plt.show()
def ccf():
print 'Calculated'
rowL=len(filenames)
colL=rowL
acf_panel,ax=subplots(rowL,colL, sharex=True, sharey=True)
#Should use absolute not relative normalization
#Currently use absolute motivation
for j in range(rowL):
for i in range(colL):
line, = ax[i,j].plot(arange(-w,w),ccfs[i+j], linewidth=2)
line.set_clip_on(False)
ax[i,j].axvline(x=0,color='r',linestyle='--', linewidth=2)
postdoc.adjust_spines(ax[i,j],['bottom','left'])
ax[i,j].spines['left'].set_smart_bounds(True)
ax[i,j].spines['bottom'].set_smart_bounds(True)
ax[i,j].set_ylabel('Covariance')
ax[i,j].set_xlabel(r'Time $\left(ms\right)$')
ax[i,j].set_axis_bgcolor('none')
ax[i,j].tick_params(direction='in')
ax[i,j].locator_params(nbins=(60/w))
ax[i,j].annotate(r" {\Large $\mathbf{%s,%s}$}" %(tech.get_channel_id(filenames[i]),tech.get_channel_id(filenames[j])),
xy=(.2, .8), xycoords='axes fraction',horizontalalignment='center', verticalalignment='center')
tight_layout()
savefig('test_ccf.png')
format = lambda text: r'\huge \textbf{\textsc{%s}}'%text if '$' not in text else r'\Large %s'%text
def biplot(data):
import rpy2.robjects as robjects
def raster_plot(data, duration=200, parasite_labels=['','STN','GPi'],filename='test.png',
break_pattern_ratio=0.1,axes_labels=None):
fig = plt.figure()
ax = fig.add_subplot(111)
length = float(len([label for label in parasite_labels if 'interpattern' not in label])+1)
cax = ax.imshow(data,interpolation='nearest',aspect='auto',cmap=plt.cm.binary)
for i,condition in enumerate(parasite_labels):
ax.annotate(format(condition), xy=(1.05/length*i+(1+break_pattern_ratio)/length, 0.97),
xycoords='figure fraction',
horizontalalignment='left', verticalalignment='top')
adjust_spines(ax)
ax.yaxis.grid(True)
ax.set_yticks(range(len(axes_labels) if axes_labels else 6))
ax.set_yticklabels(map(format,axes_labels if axes_labels else ['Cortex','Striatum','GPi','STN', 'GPe','Thalamus']))
ax.set_xlabel(format('Time (arbitrary units)'))
for demarcation in np.cumsum(duration):
ax.axvline(x=demarcation,color='r',linestyle='--', linewidth=2)
ax.set_xlim(xmin=0)
ax.xaxis.labelpad=20
plt.tight_layout()
plt.subplots_adjust(top=0.9)
plt.savefig(filename if '.' in filename else '%s.png'%filename,dpi=400)
def firing_rate_plot(data, duration=200, parasite_labels=['Background','STN','GPi'],
filename='test_rate.png',break_pattern_ratio=0.1):
fig,axs = plt.subplots(ncols=1,nrows=data.shape[0],sharex=True)
offset = 1.5
length = float(len([label for label in parasite_labels if 'interpattern' not in label])+1)
rates =[1/50.*np.sum(np.reshape(0.5*(1+row),(-1,50)),axis=1) for row in data]
for_bar_graphs = np.array([map(np.average,np.array_split(0.5*(1+row),3)) for row in data])
pprint(for_bar_graphs)
for ax,rate,label in zip(axs,rates,map(lambda text: r'\huge \textbf{%s}'%text,
['Cortex','Striatum','GPi/SNr','STN', 'GPe','Thalamus'][::-1])):
ax.plot(rate,'k')
adjust_spines(ax)
for i,condition in enumerate(parasite_labels):
ax.annotate(format(condition.capitalize()), xy=(1/length*i+(1+break_pattern_ratio)/length, 0.97),
xycoords='figure fraction',
horizontalalignment='left', verticalalignment='top')
ax.set_ylabel(label,rotation='horizontal',bbox=box)
ax.set_yticks([])
for demarcation in np.cumsum(duration)/50.:
ax.axvline(x=demarcation,color='r',linestyle='--', linewidth=2)
ax.set_xlim(xmin=0)
axs[-1].set_xlabel(format('Time (arbitrary units)'))
plt.tight_layout()
plt.subplots_adjust(top=0.9)
plt.savefig(filename if '.' in filename else '%s.png'%filename,dpi=400)
|
mac389/deep-brain-stimulation
|
src/Graphics.py
|
Python
|
apache-2.0
| 12,668
|
[
"NEURON"
] |
24496952d9ce4798de3420cb5369dcf90d7891187b5ac6551cef01f8ab647fe7
|
#!/usr/bin/env python
import glob
import numpy as np
try:
from setuptools import setup
have_setuptools = True
except ImportError:
from distutils.core import setup
have_setuptools = False
try:
from Cython.Build import cythonize
have_cython = True
except ImportError:
have_cython = False
kwargs = {'name': 'openmc',
'version': '0.8.0',
'packages': ['openmc', 'openmc.data', 'openmc.mgxs', 'openmc.model',
'openmc.stats'],
'scripts': glob.glob('scripts/openmc-*'),
# Metadata
'author': 'Will Boyd',
'author_email': 'wbinventor@gmail.com',
'description': 'OpenMC Python API',
'url': 'https://github.com/mit-crpg/openmc',
'classifiers': [
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: Scientific/Engineering'
]}
if have_setuptools:
kwargs.update({
# Required dependencies
'install_requires': ['six', 'numpy>=1.9', 'h5py', 'matplotlib'],
# Optional dependencies
'extras_require': {
'pandas': ['pandas>=0.17.0'],
'sparse' : ['scipy'],
'vtk': ['vtk', 'silomesh'],
'validate': ['lxml']
},
# Data files
'package_data': {
'openmc.data': ['mass.mas12', 'fission_Q_data_endfb71.h5']
},
})
# If Cython is present, add resonance reconstruction capability
if have_cython:
kwargs.update({
'ext_modules': cythonize('openmc/data/reconstruct.pyx'),
'include_dirs': [np.get_include()]
})
setup(**kwargs)
|
samuelshaner/openmc
|
setup.py
|
Python
|
mit
| 1,886
|
[
"VTK"
] |
0deae2efbb7bc4213faf176f9d67e5156d37740acb4ff7195976b6cd48d37276
|
"""Unit tests for the Improve CV mail module."""
import unittest
from bob_emploi.frontend.api import user_pb2
from bob_emploi.frontend.server.mail.test import campaign_helper
class PrepareYourApplicationTest(campaign_helper.CampaignTestBase):
"""Unit tests for the _get_prepare_your_application_vars method."""
campaign_id = 'prepare-your-application'
def setUp(self) -> None:
super().setUp()
self.user.profile.gender = user_pb2.MASCULINE
self.user.profile.name = 'Patrick'
self.user.profile.coaching_email_frequency = user_pb2.EMAIL_ONCE_A_MONTH
def test_basic_mail_blast(self) -> None:
"""Basic usage of a mail blast."""
self._assert_user_receives_campaign()
def test_basic_focus(self) -> None:
"""Basic usage of focus."""
self._assert_user_receives_focus()
def test_not_frustrated(self) -> None:
"""Not frustrated."""
del self.user.profile.frustrations[:]
del self.project.advices[:]
self._assert_user_receives_focus()
self._assert_has_default_vars()
self._assert_has_unsubscribe_url('changeEmailSettingsUrl', **{
'coachingEmailFrequency': 'EMAIL_ONCE_A_MONTH',
})
self._assert_has_status_update_link('statusUpdateUrl')
self.assertFalse(self._variables.pop('hasInterviewFrustration'))
self.assertFalse(self._variables.pop('hasSelfConfidenceFrustration'))
self._assert_has_logged_url('loginUrl', '/projet/0')
self._assert_remaining_variables({
'deepLinkMotivationEmailUrl': '',
})
def test_frustrated(self) -> None:
"""Frustrated about everything."""
self.user.profile.frustrations.append(user_pb2.SELF_CONFIDENCE)
self.user.profile.frustrations.append(user_pb2.INTERVIEW)
self.project.advices.add(advice_id='motivation-email')
self._assert_user_receives_focus()
self._assert_has_default_vars()
self._assert_has_unsubscribe_url('changeEmailSettingsUrl', **{
'coachingEmailFrequency': 'EMAIL_ONCE_A_MONTH',
})
self._assert_has_status_update_link('statusUpdateUrl')
self.assertEqual('True', self._variables.pop('hasInterviewFrustration'))
self.assertEqual('True', self._variables.pop('hasSelfConfidenceFrustration'))
self._assert_has_logged_url('loginUrl', '/projet/0')
self._assert_has_logged_url(
'deepLinkMotivationEmailUrl',
'/projet/0/methode/motivation-email'
)
self._assert_remaining_variables({})
if __name__ == '__main__':
unittest.main()
|
bayesimpact/bob-emploi
|
frontend/server/mail/test/prepare_your_application_test.py
|
Python
|
gpl-3.0
| 2,649
|
[
"BLAST"
] |
f8a4b91a57f390011aef758d298b015b7a6f214d5f46101d7d280bd8714a3fa4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Abinit Post Process Application
author: Martin Alexandre
last edited: May 2013
"""
import sys,os,time,commands
import string, math
#GUI
import gui.graph as Graph
import gui.conv as Conv
#Utility
import utility.write as Write
import utility.analysis as Analysis
try:
from PyQt4 import Qt,QtGui,QtCore
except:
pass;
from numpy import *
#----------------------------------------------------------------#
#---------------WINDOWS-MEAN SQUARED DEPLACEMENT-----------------#
#----------------------------------------------------------------#
class winMSD(QtGui.QWidget):
PTOE = Analysis.PeriodicTableElement()
def __init__(self, file, parent = None,name =''):
self.file = file
self.name = name
self.initUI(parent)
self.displayGraph()
self.raise_()
def initUI(self, parent):
#-----------------Creation of the windows----------------------------#
QtGui.QWidget.__init__(self, parent)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setWindowTitle(self.name + ' MSD option')
self.setFixedSize(200, 150)
self.center()
self.layout = QtGui.QGridLayout()
self.setLayout(self.layout)
self.lbl1 = QtGui.QLabel(" Atom type 1 :", self)
self.lbl1.setFixedWidth(95)
self.CBox1 = QtGui.QComboBox()
self.CBox1.setFixedWidth(70)
for i in range(len(self.file.getZnucl())):
self.CBox1.addItem(str(self.PTOE.getName(self.file.getZnucl()[i])))
self.connect(self.CBox1,QtCore.SIGNAL('currentIndexChanged(const QString&)'),self.displayGraph)
self.pbClose = QtGui.QPushButton("close")
self.pbClose.setFixedSize(70,20)
self.connect(self.pbClose,QtCore.SIGNAL("clicked()"),QtCore.SLOT('close()'))
self.layout.addWidget(self.lbl1 , 1, 0, 1, 1, QtCore.Qt.AlignRight)
self.layout.addWidget(self.CBox1 , 1, 1, 1, 1, QtCore.Qt.AlignCenter)
self.layout.addWidget(self.pbClose , 7, 0, 1, 2, QtCore.Qt.AlignCenter)
self.show()
#------------------------------------------------------------------------#
def displayGraph(self):
atom = self.CBox1.currentIndex() + 1
self.MeanSquaredDeplacement = Analysis.MSD(self.file,atom)
x = self.MeanSquaredDeplacement.getX()
y = self.MeanSquaredDeplacement.getMSD()
try:
self.graphMSD.update(x,y,'step', "Mean squared deplacement",name = self.name)
self.graphMSD.addPlot(x,linspace(1,1,len(x)))
self.graphMSD.show()
except:
self.graphMSD = Graph.graphic(x,y,'step', "Mean squared deplacement", average=False,name = self.name)
self.connect(self.graphMSD, QtCore.SIGNAL("myCustomizedSignal()"), self.close)
self.graphMSD.show()
def update(self,pfile):
self.file = pfile
atom = self.CBox1.currentIndex() + 1
try:
self.MeanSquaredDeplacement = Analysis.MSD(self.file,atom)
x = self.MeanSquaredDeplacement.getX()
y = self.MeanSquaredDeplacement.getMSD()
self.graphMSD.update(x,y,'step', "Mean squared deplacement",name = self.name)
self.graphMSD.addPlot(x,linspace(1,1,len(x)))
except:
pass
def close(self):
del self.graphMSD
del self
def closeEvent(self, event):
try:
del self.graphMSD
except:
pass
try:
del self
except:
pass
def center(self):
screen = QtGui.QDesktopWidget().screenGeometry()
size = self.geometry()
self.move((screen.width()-size.width())/2, (screen.height()-size.height())/2)
|
jmbeuken/abinit
|
scripts/post_processing/appa/gui/msd.py
|
Python
|
gpl-3.0
| 3,840
|
[
"ABINIT"
] |
608e06adce32629d78bd4fa3000caec786df25622fdc454303148a092eec6764
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import os
from commoncode.testcase import FileBasedTesting
from licensedcode import legal
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
class TestSpecialFiles(FileBasedTesting):
test_data_dir = TEST_DATA_DIR
def test_license_special_files(self):
tests = [
('legal/COPYING', 'yes'),
('legal/Copyrights', 'maybe'),
('legal/LICENSE', 'yes'),
('legal/Notice', 'yes'),
('legal/no_license_in_here.java', 'maybe'),
('legal/noticE.html', 'yes'),
('legal/useless_notice.txt', 'maybe')
]
for tf, expected in tests:
assert expected == legal.is_special_legal_file(self.get_test_loc(tf))
|
yasharmaster/scancode-toolkit
|
tests/licensedcode/test_legal.py
|
Python
|
apache-2.0
| 2,142
|
[
"VisIt"
] |
c8bfecdd13437e65ad2e9a94c04e9dc32ad2424471749713b32f28e06357e0c8
|
"""
Acceptance tests for the Import and Export pages
"""
from nose.plugins.attrib import attr
from datetime import datetime
from flaky import flaky
from abc import abstractmethod
from common.test.acceptance.tests.studio.base_studio_test import StudioLibraryTest, StudioCourseTest
from common.test.acceptance.pages.studio.import_export import (
ExportLibraryPage,
ExportCoursePage,
ImportLibraryPage,
ImportCoursePage)
from common.test.acceptance.pages.studio.library import LibraryEditPage
from common.test.acceptance.pages.studio.overview import CourseOutlinePage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.staff_view import StaffPage
class ExportTestMixin(object):
"""
Tests to run both for course and library export pages.
"""
def test_export(self):
"""
Scenario: I am able to export a course or library
Given that I have a course or library
And I click the download button
The download will succeed
And the file will be of the right MIME type.
"""
good_status, is_tarball_mimetype = self.export_page.download_tarball()
self.assertTrue(good_status)
self.assertTrue(is_tarball_mimetype)
@attr(shard=7)
class TestCourseExport(ExportTestMixin, StudioCourseTest):
"""
Export tests for courses.
"""
def setUp(self): # pylint: disable=arguments-differ
super(TestCourseExport, self).setUp()
self.export_page = ExportCoursePage(
self.browser,
self.course_info['org'], self.course_info['number'], self.course_info['run'],
)
self.export_page.visit()
def test_header(self):
"""
Scenario: I should see the correct text when exporting a course.
Given that I have a course to export from
When I visit the export page
The correct header should be shown
"""
self.assertEqual(self.export_page.header_text, 'Course Export')
@attr(shard=7)
class TestLibraryExport(ExportTestMixin, StudioLibraryTest):
"""
Export tests for libraries.
"""
def setUp(self):
"""
Ensure a library exists and navigate to the library edit page.
"""
super(TestLibraryExport, self).setUp()
self.export_page = ExportLibraryPage(self.browser, self.library_key)
self.export_page.visit()
def test_header(self):
"""
Scenario: I should see the correct text when exporting a library.
Given that I have a library to export from
When I visit the export page
The correct header should be shown
"""
self.assertEqual(self.export_page.header_text, 'Library Export')
@attr(shard=7)
class ImportTestMixin(object):
"""
Tests to run for both course and library import pages.
"""
def setUp(self):
super(ImportTestMixin, self).setUp()
self.import_page = self.import_page_class(*self.page_args())
self.landing_page = self.landing_page_class(*self.page_args())
self.import_page.visit()
@abstractmethod
def page_args(self):
"""
Generates the args for initializing a page object.
"""
return []
def test_upload(self):
"""
Scenario: I want to upload a course or library for import.
Given that I have a library or course to import into
And I have a valid .tar.gz file containing data to replace it with
I can select the file and upload it
And the page will give me confirmation that it uploaded successfully
"""
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
def test_import_timestamp(self):
"""
Scenario: I perform a course / library import
On import success, the page displays a UTC timestamp previously not visible
And if I refresh the page, the timestamp is still displayed
"""
self.assertFalse(self.import_page.is_timestamp_visible())
# Get the time when the import has started.
# import_page timestamp is in (MM/DD/YYYY at HH:mm) so replacing (second, microsecond) to
# keep the comparison consistent
upload_start_time = datetime.utcnow().replace(microsecond=0, second=0)
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
# Get the time when the import has finished.
# import_page timestamp is in (MM/DD/YYYY at HH:mm) so replacing (second, microsecond) to
# keep the comparison consistent
upload_finish_time = datetime.utcnow().replace(microsecond=0, second=0)
import_timestamp = self.import_page.parsed_timestamp
self.import_page.wait_for_timestamp_visible()
# Verify that 'import_timestamp' is between start and finish upload time
self.assertLessEqual(
upload_start_time,
import_timestamp,
"Course import timestamp should be upload_start_time <= import_timestamp <= upload_end_time"
)
self.assertGreaterEqual(
upload_finish_time,
import_timestamp,
"Course import timestamp should be upload_start_time <= import_timestamp <= upload_end_time"
)
self.import_page.visit()
self.import_page.wait_for_tasks(completed=True)
self.import_page.wait_for_timestamp_visible()
def test_landing_url(self):
"""
Scenario: When uploading a library or course, a link appears for me to view the changes.
Given that I upload a library or course
A button will appear that contains the URL to the library or course's main page
"""
self.import_page.upload_tarball(self.tarball_name)
self.assertEqual(self.import_page.finished_target_url(), self.landing_page.url)
def test_bad_filename_error(self):
"""
Scenario: I should be reprimanded for trying to upload something that isn't a .tar.gz file.
Given that I select a file that is an .mp4 for upload
An error message will appear
"""
self.import_page.upload_tarball('funny_cat_video.mp4')
self.import_page.wait_for_filename_error()
def test_task_list(self):
"""
Scenario: I should see feedback checkpoints when uploading a course or library
Given that I am on an import page
No task checkpoint list should be showing
When I upload a valid tarball
Each task in the checklist should be marked confirmed
And the task list should be visible
"""
# The task list shouldn't be visible to start.
self.assertFalse(self.import_page.is_task_list_showing(), "Task list shown too early.")
self.import_page.wait_for_tasks()
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_tasks(completed=True)
self.assertTrue(self.import_page.is_task_list_showing(), "Task list did not display.")
def test_bad_import(self):
"""
Scenario: I should see a failed checklist when uploading an invalid course or library
Given that I am on an import page
And I upload a tarball with a broken XML file
The tasks should be confirmed up until the 'Updating' task
And the 'Updating' task should be marked failed
And the remaining tasks should not be marked as started
"""
self.import_page.upload_tarball(self.bad_tarball_name)
self.import_page.wait_for_tasks(fail_on='Updating')
@attr(shard=7)
class TestEntranceExamCourseImport(ImportTestMixin, StudioCourseTest):
"""
Tests the Course import page
"""
tarball_name = 'entrance_exam_course.2015.tar.gz'
bad_tarball_name = 'bad_course.tar.gz'
import_page_class = ImportCoursePage
landing_page_class = CourseOutlinePage
def page_args(self):
return [self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']]
@flaky # TODO fix this, see TNL-6009
def test_course_updated_with_entrance_exam(self):
"""
Given that I visit an empty course before import
I should not see a section named 'Section' or 'Entrance Exam'
When I visit the import page
And I upload a course that has an entrance exam section named 'Entrance Exam'
And I visit the course outline page again
The section named 'Entrance Exam' should now be available.
And when I switch the view mode to student view and Visit CourseWare
Then I see one section in the sidebar that is 'Entrance Exam'
"""
self.landing_page.visit()
# Should not exist yet.
self.assertRaises(IndexError, self.landing_page.section, "Section")
self.assertRaises(IndexError, self.landing_page.section, "Entrance Exam")
self.import_page.visit()
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
self.landing_page.visit()
# There should be two sections. 'Entrance Exam' and 'Section' on the landing page.
self.landing_page.section("Entrance Exam")
self.landing_page.section("Section")
self.landing_page.view_live()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
StaffPage(self.browser, self.course_id).set_staff_view_mode('Student')
self.assertEqual(courseware.num_sections, 1)
self.assertIn(
"To access course materials, you must score", courseware.entrance_exam_message_selector.text[0]
)
@attr(shard=7)
class TestCourseImport(ImportTestMixin, StudioCourseTest):
"""
Tests the Course import page
"""
tarball_name = '2015.lzdwNM.tar.gz'
bad_tarball_name = 'bad_course.tar.gz'
import_page_class = ImportCoursePage
landing_page_class = CourseOutlinePage
def page_args(self):
return [self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']]
@flaky # TNL-6042
def test_course_updated(self):
"""
Given that I visit an empty course before import
I should not see a section named 'Section'
When I visit the import page
And I upload a course that has a section named 'Section'
And I visit the course outline page again
The section named 'Section' should now be available
"""
self.landing_page.visit()
# Should not exist yet.
self.assertRaises(IndexError, self.landing_page.section, "Section")
self.import_page.visit()
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
self.landing_page.visit()
# There's a section named 'Section' in the tarball.
self.landing_page.section("Section")
def test_header(self):
"""
Scenario: I should see the correct text when importing a course.
Given that I have a course to import to
When I visit the import page
The correct header should be shown
"""
self.assertEqual(self.import_page.header_text, 'Course Import')
def test_multiple_course_import_message(self):
"""
Given that I visit an empty course before import
When I visit the import page
And I upload a course with file name 2015.lzdwNM.tar.gz
Then timestamp is visible after course is updated successfully
And then I create a new course
When I visit the import page of this new course
Then timestamp is not visible
"""
self.import_page.visit()
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
self.assertTrue(self.import_page.is_timestamp_visible())
# Create a new course and visit the import page
self.course_info = {
'org': 'orgX',
'number': self.unique_id + '_2',
'run': 'test_run_2',
'display_name': 'Test Course 2' + self.unique_id
}
self.install_course_fixture()
self.import_page = self.import_page_class(*self.page_args())
self.import_page.visit()
# As this is new course which is never import so timestamp should not present
self.assertFalse(self.import_page.is_timestamp_visible())
@attr(shard=7)
class TestLibraryImport(ImportTestMixin, StudioLibraryTest):
"""
Tests the Library import page
"""
tarball_name = 'library.HhJfPD.tar.gz'
bad_tarball_name = 'bad_library.tar.gz'
import_page_class = ImportLibraryPage
landing_page_class = LibraryEditPage
def page_args(self):
return [self.browser, self.library_key]
@flaky # TODO: SOL-430
def test_library_updated(self):
"""
Given that I visit an empty library
No XBlocks should be shown
When I visit the import page
And I upload a library that contains three XBlocks
And I visit the library page
Three XBlocks should be shown
"""
self.landing_page.visit()
self.landing_page.wait_until_ready()
# No items should be in the library to start.
self.assertEqual(len(self.landing_page.xblocks), 0)
self.import_page.visit()
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
self.landing_page.visit()
self.landing_page.wait_until_ready()
# There are three blocks in the tarball.
self.assertEqual(len(self.landing_page.xblocks), 3)
def test_header(self):
"""
Scenario: I should see the correct text when importing a library.
Given that I have a library to import to
When I visit the import page
The correct header should be shown
"""
self.assertEqual(self.import_page.header_text, 'Library Import')
|
synergeticsedx/deployment-wipro
|
common/test/acceptance/tests/studio/test_import_export.py
|
Python
|
agpl-3.0
| 14,174
|
[
"VisIt"
] |
72d869f3702d3e22bd4b2d32c44c4cbc7ea1706477c42b95768219ff01a2a1da
|
# sql/elements.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""Core SQL expression elements, including :class:`_expression.ClauseElement`,
:class:`_expression.ColumnElement`, and derived classes.
"""
from __future__ import annotations
import itertools
import operator
import re
import typing
from typing import Any
from typing import Callable
from typing import Generic
from typing import Optional
from typing import overload
from typing import Sequence
from typing import Text as typing_Text
from typing import Type
from typing import TypeVar
from typing import Union
from . import coercions
from . import operators
from . import roles
from . import traversals
from . import type_api
from .annotation import Annotated
from .annotation import SupportsWrappingAnnotations
from .base import _clone
from .base import _generative
from .base import Executable
from .base import HasMemoized
from .base import Immutable
from .base import NO_ARG
from .base import SingletonConstant
from .cache_key import MemoizedHasCacheKey
from .cache_key import NO_CACHE
from .coercions import _document_text_coercion # noqa
from .operators import ColumnOperators
from .traversals import HasCopyInternals
from .visitors import cloned_traverse
from .visitors import InternalTraversal
from .visitors import traverse
from .visitors import Visitable
from .. import exc
from .. import inspection
from .. import util
from ..util.langhelpers import TypingOnly
if typing.TYPE_CHECKING:
from decimal import Decimal
from .operators import OperatorType
from .selectable import FromClause
from .selectable import Select
from .sqltypes import Boolean # noqa
from .type_api import TypeEngine
from ..engine import Compiled
from ..engine import Connection
from ..engine import Dialect
from ..engine import Engine
_NUMERIC = Union[complex, "Decimal"]
_T = TypeVar("_T", bound="Any")
_OPT = TypeVar("_OPT", bound="Any")
_NT = TypeVar("_NT", bound="_NUMERIC")
_ST = TypeVar("_ST", bound="typing_Text")
def literal(value, type_=None):
r"""Return a literal clause, bound to a bind parameter.
Literal clauses are created automatically when non-
:class:`_expression.ClauseElement` objects (such as strings, ints, dates,
etc.) are
used in a comparison operation with a :class:`_expression.ColumnElement`
subclass,
such as a :class:`~sqlalchemy.schema.Column` object. Use this function
to force the generation of a literal clause, which will be created as a
:class:`BindParameter` with a bound value.
:param value: the value to be bound. Can be any Python object supported by
the underlying DB-API, or is translatable via the given type argument.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which
will provide bind-parameter translation for this literal.
"""
return coercions.expect(roles.LiteralValueRole, value, type_=type_)
def literal_column(text, type_=None):
r"""Produce a :class:`.ColumnClause` object that has the
:paramref:`_expression.column.is_literal` flag set to True.
:func:`_expression.literal_column` is similar to
:func:`_expression.column`, except that
it is more often used as a "standalone" column expression that renders
exactly as stated; while :func:`_expression.column`
stores a string name that
will be assumed to be part of a table and may be quoted as such,
:func:`_expression.literal_column` can be that,
or any other arbitrary column-oriented
expression.
:param text: the text of the expression; can be any SQL expression.
Quoting rules will not be applied. To specify a column-name expression
which should be subject to quoting rules, use the :func:`column`
function.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine`
object which will
provide result-set translation and additional expression semantics for
this column. If left as ``None`` the type will be :class:`.NullType`.
.. seealso::
:func:`_expression.column`
:func:`_expression.text`
:ref:`sqlexpression_literal_column`
"""
return ColumnClause(text, type_=type_, is_literal=True)
class CompilerElement(Visitable):
"""base class for SQL elements that can be compiled to produce a
SQL string.
.. versionadded:: 2.0
"""
__slots__ = ()
__visit_name__ = "compiler_element"
supports_execution = False
stringify_dialect = "default"
@util.preload_module("sqlalchemy.engine.default")
@util.preload_module("sqlalchemy.engine.url")
def compile(
self,
bind: Optional[Union[Engine, Connection]] = None,
dialect: Optional[Dialect] = None,
**kw: Any,
) -> Compiled:
"""Compile this SQL expression.
The return value is a :class:`~.Compiled` object.
Calling ``str()`` or ``unicode()`` on the returned value will yield a
string representation of the result. The
:class:`~.Compiled` object also can return a
dictionary of bind parameter names and values
using the ``params`` accessor.
:param bind: An :class:`.Connection` or :class:`.Engine` which
can provide a :class:`.Dialect` in order to generate a
:class:`.Compiled` object. If the ``bind`` and
``dialect`` parameters are both omitted, a default SQL compiler
is used.
:param column_keys: Used for INSERT and UPDATE statements, a list of
column names which should be present in the VALUES clause of the
compiled statement. If ``None``, all columns from the target table
object are rendered.
:param dialect: A :class:`.Dialect` instance which can generate
a :class:`.Compiled` object. This argument takes precedence over
the ``bind`` argument.
:param compile_kwargs: optional dictionary of additional parameters
that will be passed through to the compiler within all "visit"
methods. This allows any custom flag to be passed through to
a custom compilation construct, for example. It is also used
for the case of passing the ``literal_binds`` flag through::
from sqlalchemy.sql import table, column, select
t = table('t', column('x'))
s = select(t).where(t.c.x == 5)
print(s.compile(compile_kwargs={"literal_binds": True}))
.. versionadded:: 0.9.0
.. seealso::
:ref:`faq_sql_expression_string`
"""
if not dialect:
if bind:
dialect = bind.dialect
else:
if self.stringify_dialect == "default":
default = util.preloaded.engine_default
dialect = default.StrCompileDialect()
else:
url = util.preloaded.engine_url
dialect = url.URL.create(
self.stringify_dialect
).get_dialect()()
return self._compiler(dialect, **kw)
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.statement_compiler(dialect, self, **kw)
def __str__(self):
return str(self.compile())
SelfClauseElement = TypeVar("SelfClauseElement", bound="ClauseElement")
@inspection._self_inspects
class ClauseElement(
SupportsWrappingAnnotations,
MemoizedHasCacheKey,
HasCopyInternals,
CompilerElement,
):
"""Base class for elements of a programmatically constructed SQL
expression.
"""
__visit_name__ = "clause"
_propagate_attrs = util.immutabledict()
"""like annotations, however these propagate outwards liberally
as SQL constructs are built, and are set up at construction time.
"""
_from_objects = []
bind = None
description = None
_is_clone_of = None
is_clause_element = True
is_selectable = False
_is_table = False
_is_textual = False
_is_from_clause = False
_is_returns_rows = False
_is_text_clause = False
_is_from_container = False
_is_select_container = False
_is_select_statement = False
_is_bind_parameter = False
_is_clause_list = False
_is_lambda_element = False
_is_singleton_constant = False
_is_immutable = False
_order_by_label_element = None
_cache_key_traversal = None
def _set_propagate_attrs(self, values):
# usually, self._propagate_attrs is empty here. one case where it's
# not is a subquery against ORM select, that is then pulled as a
# property of an aliased class. should all be good
# assert not self._propagate_attrs
self._propagate_attrs = util.immutabledict(values)
return self
def _clone(self: SelfClauseElement, **kw) -> SelfClauseElement:
"""Create a shallow copy of this ClauseElement.
This method may be used by a generative API. Its also used as
part of the "deep" copy afforded by a traversal that combines
the _copy_internals() method.
"""
skip = self._memoized_keys
c = self.__class__.__new__(self.__class__)
c.__dict__ = {k: v for k, v in self.__dict__.items() if k not in skip}
# this is a marker that helps to "equate" clauses to each other
# when a Select returns its list of FROM clauses. the cloning
# process leaves around a lot of remnants of the previous clause
# typically in the form of column expressions still attached to the
# old table.
c._is_clone_of = self
return c
def _negate_in_binary(self, negated_op, original_op):
"""a hook to allow the right side of a binary expression to respond
to a negation of the binary expression.
Used for the special case of expanding bind parameter with IN.
"""
return self
def _with_binary_element_type(self, type_):
"""in the context of binary expression, convert the type of this
object to the one given.
applies only to :class:`_expression.ColumnElement` classes.
"""
return self
@property
def _constructor(self):
"""return the 'constructor' for this ClauseElement.
This is for the purposes for creating a new object of
this type. Usually, its just the element's __class__.
However, the "Annotated" version of the object overrides
to return the class of its proxied element.
"""
return self.__class__
@HasMemoized.memoized_attribute
def _cloned_set(self):
"""Return the set consisting all cloned ancestors of this
ClauseElement.
Includes this ClauseElement. This accessor tends to be used for
FromClause objects to identify 'equivalent' FROM clauses, regardless
of transformative operations.
"""
s = util.column_set()
f = self
# note this creates a cycle, asserted in test_memusage. however,
# turning this into a plain @property adds tends of thousands of method
# calls to Core / ORM performance tests, so the small overhead
# introduced by the relatively small amount of short term cycles
# produced here is preferable
while f is not None:
s.add(f)
f = f._is_clone_of
return s
@property
def entity_namespace(self):
raise AttributeError(
"This SQL expression has no entity namespace "
"with which to filter from."
)
def __getstate__(self):
d = self.__dict__.copy()
d.pop("_is_clone_of", None)
d.pop("_generate_cache_key", None)
return d
def _execute_on_connection(
self, connection, distilled_params, execution_options, _force=False
):
if _force or self.supports_execution:
return connection._execute_clauseelement(
self, distilled_params, execution_options
)
else:
raise exc.ObjectNotExecutableError(self)
def unique_params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`_expression.bindparam` elements
replaced.
Same functionality as :meth:`_expression.ClauseElement.params`,
except adds `unique=True`
to affected bind parameters so that multiple statements can be
used.
"""
return self._replace_params(True, optionaldict, kwargs)
def params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`_expression.bindparam` elements
replaced.
Returns a copy of this ClauseElement with
:func:`_expression.bindparam`
elements replaced with values taken from the given dictionary::
>>> clause = column('x') + bindparam('foo')
>>> print(clause.compile().params)
{'foo':None}
>>> print(clause.params({'foo':7}).compile().params)
{'foo':7}
"""
return self._replace_params(False, optionaldict, kwargs)
def _replace_params(self, unique, optionaldict, kwargs):
if len(optionaldict) == 1:
kwargs.update(optionaldict[0])
elif len(optionaldict) > 1:
raise exc.ArgumentError(
"params() takes zero or one positional dictionary argument"
)
def visit_bindparam(bind):
if bind.key in kwargs:
bind.value = kwargs[bind.key]
bind.required = False
if unique:
bind._convert_to_unique()
return cloned_traverse(
self,
{"maintain_key": True, "detect_subquery_cols": True},
{"bindparam": visit_bindparam},
)
def compare(self, other, **kw):
r"""Compare this :class:`_expression.ClauseElement` to
the given :class:`_expression.ClauseElement`.
Subclasses should override the default behavior, which is a
straight identity comparison.
\**kw are arguments consumed by subclass ``compare()`` methods and
may be used to modify the criteria for comparison
(see :class:`_expression.ColumnElement`).
"""
return traversals.compare(self, other, **kw)
def self_group(self, against=None):
"""Apply a 'grouping' to this :class:`_expression.ClauseElement`.
This method is overridden by subclasses to return a "grouping"
construct, i.e. parenthesis. In particular it's used by "binary"
expressions to provide a grouping around themselves when placed into a
larger expression, as well as by :func:`_expression.select`
constructs when placed into the FROM clause of another
:func:`_expression.select`. (Note that subqueries should be
normally created using the :meth:`_expression.Select.alias` method,
as many
platforms require nested SELECT statements to be named).
As expressions are composed together, the application of
:meth:`self_group` is automatic - end-user code should never
need to use this method directly. Note that SQLAlchemy's
clause constructs take operator precedence into account -
so parenthesis might not be needed, for example, in
an expression like ``x OR (y AND z)`` - AND takes precedence
over OR.
The base :meth:`self_group` method of
:class:`_expression.ClauseElement`
just returns self.
"""
return self
def _ungroup(self):
"""Return this :class:`_expression.ClauseElement`
without any groupings.
"""
return self
def _compile_w_cache(
self,
dialect,
compiled_cache=None,
column_keys=None,
for_executemany=False,
schema_translate_map=None,
**kw,
):
if compiled_cache is not None and dialect._supports_statement_cache:
elem_cache_key = self._generate_cache_key()
else:
elem_cache_key = None
if elem_cache_key:
cache_key, extracted_params = elem_cache_key
key = (
dialect,
cache_key,
tuple(column_keys),
bool(schema_translate_map),
for_executemany,
)
compiled_sql = compiled_cache.get(key)
if compiled_sql is None:
cache_hit = dialect.CACHE_MISS
compiled_sql = self._compiler(
dialect,
cache_key=elem_cache_key,
column_keys=column_keys,
for_executemany=for_executemany,
schema_translate_map=schema_translate_map,
**kw,
)
compiled_cache[key] = compiled_sql
else:
cache_hit = dialect.CACHE_HIT
else:
extracted_params = None
compiled_sql = self._compiler(
dialect,
cache_key=elem_cache_key,
column_keys=column_keys,
for_executemany=for_executemany,
schema_translate_map=schema_translate_map,
**kw,
)
if not dialect._supports_statement_cache:
cache_hit = dialect.NO_DIALECT_SUPPORT
elif compiled_cache is None:
cache_hit = dialect.CACHING_DISABLED
else:
cache_hit = dialect.NO_CACHE_KEY
return compiled_sql, extracted_params, cache_hit
def __invert__(self):
# undocumented element currently used by the ORM for
# relationship.contains()
if hasattr(self, "negation_clause"):
return self.negation_clause
else:
return self._negate()
def _negate(self):
return UnaryExpression(
self.self_group(against=operators.inv), operator=operators.inv
)
def __bool__(self):
raise TypeError("Boolean value of this clause is not defined")
__nonzero__ = __bool__
def __repr__(self):
friendly = self.description
if friendly is None:
return object.__repr__(self)
else:
return "<%s.%s at 0x%x; %s>" % (
self.__module__,
self.__class__.__name__,
id(self),
friendly,
)
class CompilerColumnElement(
roles.DMLColumnRole,
roles.DDLConstraintColumnRole,
roles.ColumnsClauseRole,
CompilerElement,
):
"""A compiler-only column element used for ad-hoc string compilations.
.. versionadded:: 2.0
"""
__slots__ = ()
class SQLCoreOperations(
Generic[_T], ColumnOperators["SQLCoreOperations"], TypingOnly
):
__slots__ = ()
# annotations for comparison methods
# these are from operators->Operators / ColumnOperators,
# redefined with the specific types returned by ColumnElement hierarchies
if typing.TYPE_CHECKING:
def operate(
self, op: OperatorType, *other: Any, **kwargs: Any
) -> ColumnElement:
...
def reverse_operate(
self, op: OperatorType, other: Any, **kwargs: Any
) -> ColumnElement:
...
def op(
self,
opstring: Any,
precedence: int = 0,
is_comparison: bool = False,
return_type: Optional[
Union[Type["TypeEngine[_OPT]"], "TypeEngine[_OPT]"]
] = None,
python_impl=None,
) -> Callable[[Any], "BinaryExpression[_OPT]"]:
...
def bool_op(
self, opstring: Any, precedence: int = 0, python_impl=None
) -> Callable[[Any], "BinaryExpression[bool]"]:
...
def __and__(self, other: Any) -> "BooleanClauseList":
...
def __or__(self, other: Any) -> "BooleanClauseList":
...
def __invert__(self) -> "UnaryExpression[_T]":
...
def __lt__(self, other: Any) -> "ColumnElement[bool]":
...
def __le__(self, other: Any) -> "ColumnElement[bool]":
...
def __eq__(self, other: Any) -> "ColumnElement[bool]": # type: ignore[override] # noqa: E501
...
def __ne__(self, other: Any) -> "ColumnElement[bool]": # type: ignore[override] # noqa: E501
...
def is_distinct_from(self, other: Any) -> "ColumnElement[bool]":
...
def is_not_distinct_from(self, other: Any) -> "ColumnElement[bool]":
...
def __gt__(self, other: Any) -> "ColumnElement[bool]":
...
def __ge__(self, other: Any) -> "ColumnElement[bool]":
...
def __neg__(self) -> "UnaryExpression[_T]":
...
def __contains__(self, other: Any) -> "ColumnElement[bool]":
...
def __getitem__(self, index: Any) -> "ColumnElement":
...
@overload
def concat(
self: "SQLCoreOperations[_ST]", other: Any
) -> "ColumnElement[_ST]":
...
@overload
def concat(self, other: Any) -> "ColumnElement":
...
def concat(self, other: Any) -> "ColumnElement":
...
def like(self, other: Any, escape=None) -> "BinaryExpression[bool]":
...
def ilike(self, other: Any, escape=None) -> "BinaryExpression[bool]":
...
def in_(
self,
other: Union[Sequence[Any], "BindParameter", "Select"],
) -> "BinaryExpression[bool]":
...
def not_in(
self,
other: Union[Sequence[Any], "BindParameter", "Select"],
) -> "BinaryExpression[bool]":
...
def not_like(
self, other: Any, escape=None
) -> "BinaryExpression[bool]":
...
def not_ilike(
self, other: Any, escape=None
) -> "BinaryExpression[bool]":
...
def is_(self, other: Any) -> "BinaryExpression[bool]":
...
def is_not(self, other: Any) -> "BinaryExpression[bool]":
...
def startswith(
self, other: Any, escape=None, autoescape=False
) -> "ColumnElement[bool]":
...
def endswith(
self, other: Any, escape=None, autoescape=False
) -> "ColumnElement[bool]":
...
def contains(self, other: Any, **kw: Any) -> "ColumnElement[bool]":
...
def match(self, other: Any, **kwargs) -> "ColumnElement[bool]":
...
def regexp_match(self, pattern, flags=None) -> "ColumnElement[bool]":
...
def regexp_replace(
self, pattern, replacement, flags=None
) -> "ColumnElement":
...
def desc(self) -> "UnaryExpression[_T]":
...
def asc(self) -> "UnaryExpression[_T]":
...
def nulls_first(self) -> "UnaryExpression[_T]":
...
def nulls_last(self) -> "UnaryExpression[_T]":
...
def collate(self, collation) -> "CollationClause":
...
def between(
self, cleft, cright, symmetric=False
) -> "ColumnElement[bool]":
...
def distinct(self: "SQLCoreOperations[_T]") -> "UnaryExpression[_T]":
...
def any_(self) -> "CollectionAggregate":
...
def all_(self) -> "CollectionAggregate":
...
# numeric overloads. These need more tweaking
# in particular they all need to have a variant for Optiona[_T]
# because Optional only applies to the data side, not the expression
# side
@overload
def __add__(
self: "Union[_SQO[_NT], _SQO[Optional[_NT]]]",
other: "Union[_SQO[Optional[_NT]], _SQO[_NT], _NT]",
) -> "ColumnElement[_NT]":
...
@overload
def __add__(
self: "Union[_SQO[_NT], _SQO[Optional[_NT]]]",
other: Any,
) -> "ColumnElement[_NUMERIC]":
...
@overload
def __add__(
self: "Union[_SQO[_ST], _SQO[Optional[_ST]]]",
other: Any,
) -> "ColumnElement[_ST]":
...
def __add__(self, other: Any) -> "ColumnElement":
...
@overload
def __radd__(self, other: Any) -> "ColumnElement[_NUMERIC]":
...
@overload
def __radd__(self, other: Any) -> "ColumnElement":
...
def __radd__(self, other: Any) -> "ColumnElement":
...
@overload
def __sub__(
self: "SQLCoreOperations[_NT]",
other: "Union[SQLCoreOperations[_NT], _NT]",
) -> "ColumnElement[_NT]":
...
@overload
def __sub__(self, other: Any) -> "ColumnElement":
...
def __sub__(self, other: Any) -> "ColumnElement":
...
@overload
def __rsub__(
self: "SQLCoreOperations[_NT]", other: Any
) -> "ColumnElement[_NUMERIC]":
...
@overload
def __rsub__(self, other: Any) -> "ColumnElement":
...
def __rsub__(self, other: Any) -> "ColumnElement":
...
@overload
def __mul__(
self: "SQLCoreOperations[_NT]", other: Any
) -> "ColumnElement[_NUMERIC]":
...
@overload
def __mul__(self, other: Any) -> "ColumnElement":
...
def __mul__(self, other: Any) -> "ColumnElement":
...
@overload
def __rmul__(
self: "SQLCoreOperations[_NT]", other: Any
) -> "ColumnElement[_NUMERIC]":
...
@overload
def __rmul__(self, other: Any) -> "ColumnElement":
...
def __rmul__(self, other: Any) -> "ColumnElement":
...
@overload
def __mod__(
self: "SQLCoreOperations[_NT]", other: Any
) -> "ColumnElement[_NUMERIC]":
...
@overload
def __mod__(self, other: Any) -> "ColumnElement":
...
def __mod__(self, other: Any) -> "ColumnElement":
...
@overload
def __rmod__(
self: "SQLCoreOperations[_NT]", other: Any
) -> "ColumnElement[_NUMERIC]":
...
@overload
def __rmod__(self, other: Any) -> "ColumnElement":
...
def __rmod__(self, other: Any) -> "ColumnElement":
...
@overload
def __truediv__(
self: "SQLCoreOperations[_NT]", other: Any
) -> "ColumnElement[_NUMERIC]":
...
@overload
def __truediv__(self, other: Any) -> "ColumnElement":
...
def __truediv__(self, other: Any) -> "ColumnElement":
...
@overload
def __rtruediv__(
self: "SQLCoreOperations[_NT]", other: Any
) -> "ColumnElement[_NUMERIC]":
...
@overload
def __rtruediv__(self, other: Any) -> "ColumnElement":
...
def __rtruediv__(self, other: Any) -> "ColumnElement":
...
@overload
def __floordiv__(
self: "SQLCoreOperations[_NT]", other: Any
) -> "ColumnElement[_NUMERIC]":
...
@overload
def __floordiv__(self, other: Any) -> "ColumnElement":
...
def __floordiv__(self, other: Any) -> "ColumnElement":
...
@overload
def __rfloordiv__(
self: "SQLCoreOperations[_NT]", other: Any
) -> "ColumnElement[_NUMERIC]":
...
@overload
def __rfloordiv__(self, other: Any) -> "ColumnElement":
...
def __rfloordiv__(self, other: Any) -> "ColumnElement":
...
_SQO = SQLCoreOperations
class ColumnElement(
roles.ColumnArgumentOrKeyRole,
roles.StatementOptionRole,
roles.WhereHavingRole,
roles.BinaryElementRole,
roles.OrderByRole,
roles.ColumnsClauseRole,
roles.LimitOffsetRole,
roles.DMLColumnRole,
roles.DDLConstraintColumnRole,
roles.DDLExpressionRole,
SQLCoreOperations[_T],
operators.ColumnOperators[SQLCoreOperations],
ClauseElement,
):
"""Represent a column-oriented SQL expression suitable for usage in the
"columns" clause, WHERE clause etc. of a statement.
While the most familiar kind of :class:`_expression.ColumnElement` is the
:class:`_schema.Column` object, :class:`_expression.ColumnElement`
serves as the basis
for any unit that may be present in a SQL expression, including
the expressions themselves, SQL functions, bound parameters,
literal expressions, keywords such as ``NULL``, etc.
:class:`_expression.ColumnElement`
is the ultimate base class for all such elements.
A wide variety of SQLAlchemy Core functions work at the SQL expression
level, and are intended to accept instances of
:class:`_expression.ColumnElement` as
arguments. These functions will typically document that they accept a
"SQL expression" as an argument. What this means in terms of SQLAlchemy
usually refers to an input which is either already in the form of a
:class:`_expression.ColumnElement` object,
or a value which can be **coerced** into
one. The coercion rules followed by most, but not all, SQLAlchemy Core
functions with regards to SQL expressions are as follows:
* a literal Python value, such as a string, integer or floating
point value, boolean, datetime, ``Decimal`` object, or virtually
any other Python object, will be coerced into a "literal bound
value". This generally means that a :func:`.bindparam` will be
produced featuring the given value embedded into the construct; the
resulting :class:`.BindParameter` object is an instance of
:class:`_expression.ColumnElement`.
The Python value will ultimately be sent
to the DBAPI at execution time as a parameterized argument to the
``execute()`` or ``executemany()`` methods, after SQLAlchemy
type-specific converters (e.g. those provided by any associated
:class:`.TypeEngine` objects) are applied to the value.
* any special object value, typically ORM-level constructs, which
feature an accessor called ``__clause_element__()``. The Core
expression system looks for this method when an object of otherwise
unknown type is passed to a function that is looking to coerce the
argument into a :class:`_expression.ColumnElement` and sometimes a
:class:`_expression.SelectBase` expression.
It is used within the ORM to
convert from ORM-specific objects like mapped classes and
mapped attributes into Core expression objects.
* The Python ``None`` value is typically interpreted as ``NULL``,
which in SQLAlchemy Core produces an instance of :func:`.null`.
A :class:`_expression.ColumnElement` provides the ability to generate new
:class:`_expression.ColumnElement`
objects using Python expressions. This means that Python operators
such as ``==``, ``!=`` and ``<`` are overloaded to mimic SQL operations,
and allow the instantiation of further :class:`_expression.ColumnElement`
instances
which are composed from other, more fundamental
:class:`_expression.ColumnElement`
objects. For example, two :class:`.ColumnClause` objects can be added
together with the addition operator ``+`` to produce
a :class:`.BinaryExpression`.
Both :class:`.ColumnClause` and :class:`.BinaryExpression` are subclasses
of :class:`_expression.ColumnElement`::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print(column('a') + column('b'))
a + b
.. seealso::
:class:`_schema.Column`
:func:`_expression.column`
"""
__visit_name__ = "column_element"
primary_key = False
foreign_keys = []
_proxies = ()
_tq_label = None
"""The named label that can be used to target
this column in a result set in a "table qualified" context.
This label is almost always the label used when
rendering <expr> AS <label> in a SELECT statement when using
the LABEL_STYLE_TABLENAME_PLUS_COL label style, which is what the legacy
ORM ``Query`` object uses as well.
For a regular Column bound to a Table, this is typically the label
<tablename>_<columnname>. For other constructs, different rules
may apply, such as anonymized labels and others.
.. versionchanged:: 1.4.21 renamed from ``._label``
"""
key = None
"""The 'key' that in some circumstances refers to this object in a
Python namespace.
This typically refers to the "key" of the column as present in the
``.c`` collection of a selectable, e.g. ``sometable.c["somekey"]`` would
return a :class:`_schema.Column` with a ``.key`` of "somekey".
"""
@HasMemoized.memoized_attribute
def _tq_key_label(self):
"""A label-based version of 'key' that in some circumstances refers
to this object in a Python namespace.
_tq_key_label comes into play when a select() statement is constructed
with apply_labels(); in this case, all Column objects in the ``.c``
collection are rendered as <tablename>_<columnname> in SQL; this is
essentially the value of ._label. But to locate those columns in the
``.c`` collection, the name is along the lines of <tablename>_<key>;
that's the typical value of .key_label.
.. versionchanged:: 1.4.21 renamed from ``._key_label``
"""
return self._proxy_key
@property
def _key_label(self):
"""legacy; renamed to _tq_key_label"""
return self._tq_key_label
@property
def _label(self):
"""legacy; renamed to _tq_label"""
return self._tq_label
@property
def _non_anon_label(self):
"""the 'name' that naturally applies this element when rendered in
SQL.
Concretely, this is the "name" of a column or a label in a
SELECT statement; ``<columnname>`` and ``<labelname>`` below::
SELECT <columnmame> FROM table
SELECT column AS <labelname> FROM table
Above, the two names noted will be what's present in the DBAPI
``cursor.description`` as the names.
If this attribute returns ``None``, it means that the SQL element as
written does not have a 100% fully predictable "name" that would appear
in the ``cursor.description``. Examples include SQL functions, CAST
functions, etc. While such things do return names in
``cursor.description``, they are only predictable on a
database-specific basis; e.g. an expression like ``MAX(table.col)`` may
appear as the string ``max`` on one database (like PostgreSQL) or may
appear as the whole expression ``max(table.col)`` on SQLite.
The default implementation looks for a ``.name`` attribute on the
object, as has been the precedent established in SQLAlchemy for many
years. An exception is made on the ``FunctionElement`` subclass
so that the return value is always ``None``.
.. versionadded:: 1.4.21
"""
return getattr(self, "name", None)
_render_label_in_columns_clause = True
"""A flag used by select._columns_plus_names that helps to determine
we are actually going to render in terms of "SELECT <col> AS <label>".
This flag can be returned as False for some Column objects that want
to be rendered as simple "SELECT <col>"; typically columns that don't have
any parent table and are named the same as what the label would be
in any case.
"""
_allow_label_resolve = True
"""A flag that can be flipped to prevent a column from being resolvable
by string label name.
The joined eager loader strategy in the ORM uses this, for example.
"""
_is_implicitly_boolean = False
_alt_names = ()
def self_group(self, against=None):
if (
against in (operators.and_, operators.or_, operators._asbool)
and self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity
):
return AsBoolean(self, operators.is_true, operators.is_false)
elif against in (operators.any_op, operators.all_op):
return Grouping(self)
else:
return self
def _negate(self):
if self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity:
return AsBoolean(self, operators.is_false, operators.is_true)
else:
return super(ColumnElement, self)._negate()
@util.memoized_property
def type(self) -> "TypeEngine[_T]":
return type_api.NULLTYPE
@HasMemoized.memoized_attribute
def comparator(self) -> "TypeEngine.Comparator[_T]":
try:
comparator_factory = self.type.comparator_factory
except AttributeError as err:
raise TypeError(
"Object %r associated with '.type' attribute "
"is not a TypeEngine class or object" % self.type
) from err
else:
return comparator_factory(self)
def __getattr__(self, key):
try:
return getattr(self.comparator, key)
except AttributeError as err:
raise AttributeError(
"Neither %r object nor %r object has an attribute %r"
% (
type(self).__name__,
type(self.comparator).__name__,
key,
)
) from err
def operate(
self,
op: operators.OperatorType,
*other: Any,
**kwargs,
) -> "ColumnElement":
return op(self.comparator, *other, **kwargs)
def reverse_operate(
self, op: operators.OperatorType, other: Any, **kwargs
) -> "ColumnElement":
return op(other, self.comparator, **kwargs)
def _bind_param(self, operator, obj, type_=None, expanding=False):
return BindParameter(
None,
obj,
_compared_to_operator=operator,
type_=type_,
_compared_to_type=self.type,
unique=True,
expanding=expanding,
)
@property
def expression(self):
"""Return a column expression.
Part of the inspection interface; returns self.
"""
return self
@property
def _select_iterable(self):
return (self,)
@util.memoized_property
def base_columns(self):
return util.column_set(c for c in self.proxy_set if not c._proxies)
@util.memoized_property
def proxy_set(self):
s = util.column_set([self])
for c in self._proxies:
s.update(c.proxy_set)
return s
def _uncached_proxy_set(self):
"""An 'uncached' version of proxy set.
This is so that we can read annotations from the list of columns
without breaking the caching of the above proxy_set.
"""
s = util.column_set([self])
for c in self._proxies:
s.update(c._uncached_proxy_set())
return s
def shares_lineage(self, othercolumn):
"""Return True if the given :class:`_expression.ColumnElement`
has a common ancestor to this :class:`_expression.ColumnElement`."""
return bool(self.proxy_set.intersection(othercolumn.proxy_set))
def _compare_name_for_result(self, other):
"""Return True if the given column element compares to this one
when targeting within a result row."""
return (
hasattr(other, "name")
and hasattr(self, "name")
and other.name == self.name
)
@HasMemoized.memoized_attribute
def _proxy_key(self):
if self._annotations and "proxy_key" in self._annotations:
return self._annotations["proxy_key"]
name = self.key
if not name:
# there's a bit of a seeming contradiction which is that the
# "_non_anon_label" of a column can in fact be an
# "_anonymous_label"; this is when it's on a column that is
# proxying for an anonymous expression in a subquery.
name = self._non_anon_label
if isinstance(name, _anonymous_label):
return None
else:
return name
@HasMemoized.memoized_attribute
def _expression_label(self):
"""a suggested label to use in the case that the column has no name,
which should be used if possible as the explicit 'AS <label>'
where this expression would normally have an anon label.
this is essentially mostly what _proxy_key does except it returns
None if the column has a normal name that can be used.
"""
if getattr(self, "name", None) is not None:
return None
elif self._annotations and "proxy_key" in self._annotations:
return self._annotations["proxy_key"]
else:
return None
def _make_proxy(
self,
selectable,
name: Optional[str] = None,
key=None,
name_is_truncatable=False,
**kw,
):
"""Create a new :class:`_expression.ColumnElement` representing this
:class:`_expression.ColumnElement` as it appears in the select list of
a descending selectable.
"""
if name is None:
name = self._anon_name_label
if key is None:
key = self._proxy_key
else:
key = name
co = ColumnClause(
coercions.expect(roles.TruncatedLabelRole, name)
if name_is_truncatable
else name,
type_=getattr(self, "type", None),
_selectable=selectable,
)
co._propagate_attrs = selectable._propagate_attrs
co._proxies = [self]
if selectable._is_clone_of is not None:
co._is_clone_of = selectable._is_clone_of.columns.get(key)
return key, co
def cast(self, type_):
"""Produce a type cast, i.e. ``CAST(<expression> AS <type>)``.
This is a shortcut to the :func:`_expression.cast` function.
.. seealso::
:ref:`coretutorial_casts`
:func:`_expression.cast`
:func:`_expression.type_coerce`
.. versionadded:: 1.0.7
"""
return Cast(self, type_)
def label(self, name):
"""Produce a column label, i.e. ``<columnname> AS <name>``.
This is a shortcut to the :func:`_expression.label` function.
If 'name' is ``None``, an anonymous label name will be generated.
"""
return Label(name, self, self.type)
def _anon_label(self, seed, add_hash=None) -> "_anonymous_label":
while self._is_clone_of is not None:
self = self._is_clone_of
# as of 1.4 anonymous label for ColumnElement uses hash(), not id(),
# as the identifier, because a column and its annotated version are
# the same thing in a SQL statement
hash_value = hash(self)
if add_hash:
# this path is used for disambiguating anon labels that would
# otherwise be the same name for the same element repeated.
# an additional numeric value is factored in for each label.
# shift hash(self) (which is id(self), typically 8 byte integer)
# 16 bits leftward. fill extra add_hash on right
assert add_hash < (2 << 15)
assert seed
hash_value = (hash_value << 16) | add_hash
# extra underscore is added for labels with extra hash
# values, to isolate the "deduped anon" namespace from the
# regular namespace. eliminates chance of these
# manufactured hash values overlapping with regular ones for some
# undefined python interpreter
seed = seed + "_"
if isinstance(seed, _anonymous_label):
return _anonymous_label.safe_construct(
hash_value, "", enclosing_label=seed
)
return _anonymous_label.safe_construct(hash_value, seed or "anon")
@util.memoized_property
def _anon_name_label(self) -> "_anonymous_label":
"""Provides a constant 'anonymous label' for this ColumnElement.
This is a label() expression which will be named at compile time.
The same label() is returned each time ``anon_label`` is called so
that expressions can reference ``anon_label`` multiple times,
producing the same label name at compile time.
The compiler uses this function automatically at compile time
for expressions that are known to be 'unnamed' like binary
expressions and function calls.
.. versionchanged:: 1.4.9 - this attribute was not intended to be
public and is renamed to _anon_name_label. anon_name exists
for backwards compat
"""
name = getattr(self, "name", None)
return self._anon_label(name)
@util.memoized_property
def _anon_key_label(self):
"""Provides a constant 'anonymous key label' for this ColumnElement.
Compare to ``anon_label``, except that the "key" of the column,
if available, is used to generate the label.
This is used when a deduplicating key is placed into the columns
collection of a selectable.
.. versionchanged:: 1.4.9 - this attribute was not intended to be
public and is renamed to _anon_key_label. anon_key_label exists
for backwards compat
"""
return self._anon_label(self._proxy_key)
@property
@util.deprecated(
"1.4",
"The :attr:`_expression.ColumnElement.anon_label` attribute is now "
"private, and the public accessor is deprecated.",
)
def anon_label(self):
return self._anon_name_label
@property
@util.deprecated(
"1.4",
"The :attr:`_expression.ColumnElement.anon_key_label` attribute is "
"now private, and the public accessor is deprecated.",
)
def anon_key_label(self):
return self._anon_key_label
def _dedupe_anon_label_idx(self, idx):
"""label to apply to a column that is anon labeled, but repeated
in the SELECT, so that we have to make an "extra anon" label that
disambiguates it from the previous appearance.
these labels come out like "foo_bar_id__1" and have double underscores
in them.
"""
label = getattr(self, "name", None)
# current convention is that if the element doesn't have a
# ".name" (usually because it is not NamedColumn), we try to
# use a "table qualified" form for the "dedupe anon" label,
# based on the notion that a label like
# "CAST(casttest.v1 AS DECIMAL) AS casttest_v1__1" looks better than
# "CAST(casttest.v1 AS DECIMAL) AS anon__1"
if label is None:
return self._dedupe_anon_tq_label_idx(idx)
else:
return self._anon_label(label, add_hash=idx)
@util.memoized_property
def _anon_tq_label(self):
return self._anon_label(getattr(self, "_tq_label", None))
@util.memoized_property
def _anon_tq_key_label(self):
return self._anon_label(getattr(self, "_tq_key_label", None))
def _dedupe_anon_tq_label_idx(self, idx):
label = getattr(self, "_tq_label", None) or "anon"
return self._anon_label(label, add_hash=idx)
class WrapsColumnExpression:
"""Mixin that defines a :class:`_expression.ColumnElement`
as a wrapper with special
labeling behavior for an expression that already has a name.
.. versionadded:: 1.4
.. seealso::
:ref:`change_4449`
"""
@property
def wrapped_column_expression(self):
raise NotImplementedError()
@property
def _tq_label(self):
wce = self.wrapped_column_expression
if hasattr(wce, "_tq_label"):
return wce._tq_label
else:
return None
_label = _tq_label
@property
def _non_anon_label(self):
return None
@property
def _anon_name_label(self):
wce = self.wrapped_column_expression
# this logic tries to get the WrappedColumnExpression to render
# with "<expr> AS <name>", where "<name>" is the natural name
# within the expression itself. e.g. "CAST(table.foo) AS foo".
if not wce._is_text_clause:
nal = wce._non_anon_label
if nal:
return nal
elif hasattr(wce, "_anon_name_label"):
return wce._anon_name_label
return super(WrapsColumnExpression, self)._anon_name_label
def _dedupe_anon_label_idx(self, idx):
wce = self.wrapped_column_expression
nal = wce._non_anon_label
if nal:
return self._anon_label(nal + "_")
else:
return self._dedupe_anon_tq_label_idx(idx)
SelfBindParameter = TypeVar("SelfBindParameter", bound="BindParameter")
class BindParameter(roles.InElementRole, ColumnElement[_T]):
r"""Represent a "bound expression".
:class:`.BindParameter` is invoked explicitly using the
:func:`.bindparam` function, as in::
from sqlalchemy import bindparam
stmt = select(users_table).\
where(users_table.c.name == bindparam('username'))
Detailed discussion of how :class:`.BindParameter` is used is
at :func:`.bindparam`.
.. seealso::
:func:`.bindparam`
"""
__visit_name__ = "bindparam"
_traverse_internals = [
("key", InternalTraversal.dp_anon_name),
("type", InternalTraversal.dp_type),
("callable", InternalTraversal.dp_plain_dict),
("value", InternalTraversal.dp_plain_obj),
]
_is_crud = False
_is_bind_parameter = True
_key_is_anon = False
# bindparam implements its own _gen_cache_key() method however
# we check subclasses for this flag, else no cache key is generated
inherit_cache = True
def __init__(
self,
key,
value=NO_ARG,
type_=None,
unique=False,
required=NO_ARG,
quote=None,
callable_=None,
expanding=False,
isoutparam=False,
literal_execute=False,
_compared_to_operator=None,
_compared_to_type=None,
_is_crud=False,
):
if required is NO_ARG:
required = value is NO_ARG and callable_ is None
if value is NO_ARG:
value = None
if quote is not None:
key = quoted_name(key, quote)
if unique:
self.key = _anonymous_label.safe_construct(
id(self),
key
if key is not None and not isinstance(key, _anonymous_label)
else "param",
sanitize_key=True,
)
self._key_is_anon = True
elif key:
self.key = key
else:
self.key = _anonymous_label.safe_construct(id(self), "param")
self._key_is_anon = True
# identifying key that won't change across
# clones, used to identify the bind's logical
# identity
self._identifying_key = self.key
# key that was passed in the first place, used to
# generate new keys
self._orig_key = key or "param"
self.unique = unique
self.value = value
self.callable = callable_
self.isoutparam = isoutparam
self.required = required
# indicate an "expanding" parameter; the compiler sets this
# automatically in the compiler _render_in_expr_w_bindparam method
# for an IN expression
self.expanding = expanding
# this is another hint to help w/ expanding and is typically
# set in the compiler _render_in_expr_w_bindparam method for an
# IN expression
self.expand_op = None
self.literal_execute = literal_execute
if _is_crud:
self._is_crud = True
if type_ is None:
if expanding and value:
check_value = value[0]
else:
check_value = value
if _compared_to_type is not None:
self.type = _compared_to_type.coerce_compared_value(
_compared_to_operator, check_value
)
else:
self.type = type_api._resolve_value_to_type(check_value)
elif isinstance(type_, type):
self.type = type_()
elif type_._is_tuple_type and value:
if expanding:
check_value = value[0]
else:
check_value = value
self.type = type_._resolve_values_to_types(check_value)
else:
self.type = type_
def _with_value(self, value, maintain_key=False, required=NO_ARG):
"""Return a copy of this :class:`.BindParameter` with the given value
set.
"""
cloned = self._clone(maintain_key=maintain_key)
cloned.value = value
cloned.callable = None
cloned.required = required if required is not NO_ARG else self.required
if cloned.type is type_api.NULLTYPE:
cloned.type = type_api._resolve_value_to_type(value)
return cloned
@property
def effective_value(self):
"""Return the value of this bound parameter,
taking into account if the ``callable`` parameter
was set.
The ``callable`` value will be evaluated
and returned if present, else ``value``.
"""
if self.callable:
return self.callable()
else:
return self.value
def render_literal_execute(self):
"""Produce a copy of this bound parameter that will enable the
:paramref:`_sql.BindParameter.literal_execute` flag.
The :paramref:`_sql.BindParameter.literal_execute` flag will
have the effect of the parameter rendered in the compiled SQL
string using ``[POSTCOMPILE]`` form, which is a special form that
is converted to be a rendering of the literal value of the parameter
at SQL execution time. The rationale is to support caching
of SQL statement strings that can embed per-statement literal values,
such as LIMIT and OFFSET parameters, in the final SQL string that
is passed to the DBAPI. Dialects in particular may want to use
this method within custom compilation schemes.
.. versionadded:: 1.4.5
.. seealso::
:ref:`engine_thirdparty_caching`
"""
return self.__class__(
self.key,
self.value,
type_=self.type,
literal_execute=True,
)
def _negate_in_binary(self, negated_op, original_op):
if self.expand_op is original_op:
bind = self._clone()
bind.expand_op = negated_op
return bind
else:
return self
def _with_binary_element_type(self, type_):
c = ClauseElement._clone(self)
c.type = type_
return c
def _clone(
self: SelfBindParameter, maintain_key=False, **kw
) -> SelfBindParameter:
c = ClauseElement._clone(self, **kw)
if not maintain_key and self.unique:
c.key = _anonymous_label.safe_construct(
id(c), c._orig_key or "param", sanitize_key=True
)
return c
def _gen_cache_key(self, anon_map, bindparams):
_gen_cache_ok = self.__class__.__dict__.get("inherit_cache", False)
if not _gen_cache_ok:
if anon_map is not None:
anon_map[NO_CACHE] = True
return None
id_, found = anon_map.get_anon(self)
if found:
return (id_, self.__class__)
if bindparams is not None:
bindparams.append(self)
return (
id_,
self.__class__,
self.type._static_cache_key,
self.key % anon_map if self._key_is_anon else self.key,
)
def _convert_to_unique(self):
if not self.unique:
self.unique = True
self.key = _anonymous_label.safe_construct(
id(self), self._orig_key or "param", sanitize_key=True
)
def __getstate__(self):
"""execute a deferred value for serialization purposes."""
d = self.__dict__.copy()
v = self.value
if self.callable:
v = self.callable()
d["callable"] = None
d["value"] = v
return d
def __setstate__(self, state):
if state.get("unique", False):
state["key"] = _anonymous_label.safe_construct(
id(self), state.get("_orig_key", "param"), sanitize_key=True
)
self.__dict__.update(state)
def __repr__(self):
return "%s(%r, %r, type_=%r)" % (
self.__class__.__name__,
self.key,
self.value,
self.type,
)
class TypeClause(ClauseElement):
"""Handle a type keyword in a SQL statement.
Used by the ``Case`` statement.
"""
__visit_name__ = "typeclause"
_traverse_internals = [("type", InternalTraversal.dp_type)]
def __init__(self, type_):
self.type = type_
SelfTextClause = typing.TypeVar("SelfTextClause", bound="TextClause")
class TextClause(
roles.DDLConstraintColumnRole,
roles.DDLExpressionRole,
roles.StatementOptionRole,
roles.WhereHavingRole,
roles.OrderByRole,
roles.FromClauseRole,
roles.SelectStatementRole,
roles.BinaryElementRole,
roles.InElementRole,
Executable,
ClauseElement,
):
"""Represent a literal SQL text fragment.
E.g.::
from sqlalchemy import text
t = text("SELECT * FROM users")
result = connection.execute(t)
The :class:`_expression.TextClause` construct is produced using the
:func:`_expression.text`
function; see that function for full documentation.
.. seealso::
:func:`_expression.text`
"""
__visit_name__ = "textclause"
_traverse_internals = [
("_bindparams", InternalTraversal.dp_string_clauseelement_dict),
("text", InternalTraversal.dp_string),
]
_is_text_clause = True
_is_textual = True
_bind_params_regex = re.compile(r"(?<![:\w\x5c]):(\w+)(?!:)", re.UNICODE)
_is_implicitly_boolean = False
_render_label_in_columns_clause = False
_hide_froms = ()
def __and__(self, other):
# support use in select.where(), query.filter()
return and_(self, other)
@property
def _select_iterable(self):
return (self,)
# help in those cases where text() is
# interpreted in a column expression situation
key = _label = None
_allow_label_resolve = False
def __init__(self, text):
self._bindparams = {}
def repl(m):
self._bindparams[m.group(1)] = BindParameter(m.group(1))
return ":%s" % m.group(1)
# scan the string and search for bind parameter names, add them
# to the list of bindparams
self.text = self._bind_params_regex.sub(repl, text)
@_generative
def bindparams(
self: SelfTextClause, *binds, **names_to_values
) -> SelfTextClause:
"""Establish the values and/or types of bound parameters within
this :class:`_expression.TextClause` construct.
Given a text construct such as::
from sqlalchemy import text
stmt = text("SELECT id, name FROM user WHERE name=:name "
"AND timestamp=:timestamp")
the :meth:`_expression.TextClause.bindparams`
method can be used to establish
the initial value of ``:name`` and ``:timestamp``,
using simple keyword arguments::
stmt = stmt.bindparams(name='jack',
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5))
Where above, new :class:`.BindParameter` objects
will be generated with the names ``name`` and ``timestamp``, and
values of ``jack`` and ``datetime.datetime(2012, 10, 8, 15, 12, 5)``,
respectively. The types will be
inferred from the values given, in this case :class:`.String` and
:class:`.DateTime`.
When specific typing behavior is needed, the positional ``*binds``
argument can be used in which to specify :func:`.bindparam` constructs
directly. These constructs must include at least the ``key``
argument, then an optional value and type::
from sqlalchemy import bindparam
stmt = stmt.bindparams(
bindparam('name', value='jack', type_=String),
bindparam('timestamp', type_=DateTime)
)
Above, we specified the type of :class:`.DateTime` for the
``timestamp`` bind, and the type of :class:`.String` for the ``name``
bind. In the case of ``name`` we also set the default value of
``"jack"``.
Additional bound parameters can be supplied at statement execution
time, e.g.::
result = connection.execute(stmt,
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5))
The :meth:`_expression.TextClause.bindparams`
method can be called repeatedly,
where it will re-use existing :class:`.BindParameter` objects to add
new information. For example, we can call
:meth:`_expression.TextClause.bindparams`
first with typing information, and a
second time with value information, and it will be combined::
stmt = text("SELECT id, name FROM user WHERE name=:name "
"AND timestamp=:timestamp")
stmt = stmt.bindparams(
bindparam('name', type_=String),
bindparam('timestamp', type_=DateTime)
)
stmt = stmt.bindparams(
name='jack',
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5)
)
The :meth:`_expression.TextClause.bindparams`
method also supports the concept of
**unique** bound parameters. These are parameters that are
"uniquified" on name at statement compilation time, so that multiple
:func:`_expression.text`
constructs may be combined together without the names
conflicting. To use this feature, specify the
:paramref:`.BindParameter.unique` flag on each :func:`.bindparam`
object::
stmt1 = text("select id from table where name=:name").bindparams(
bindparam("name", value='name1', unique=True)
)
stmt2 = text("select id from table where name=:name").bindparams(
bindparam("name", value='name2', unique=True)
)
union = union_all(
stmt1.columns(column("id")),
stmt2.columns(column("id"))
)
The above statement will render as::
select id from table where name=:name_1
UNION ALL select id from table where name=:name_2
.. versionadded:: 1.3.11 Added support for the
:paramref:`.BindParameter.unique` flag to work with
:func:`_expression.text`
constructs.
"""
self._bindparams = new_params = self._bindparams.copy()
for bind in binds:
try:
# the regex used for text() currently will not match
# a unique/anonymous key in any case, so use the _orig_key
# so that a text() construct can support unique parameters
existing = new_params[bind._orig_key]
except KeyError as err:
raise exc.ArgumentError(
"This text() construct doesn't define a "
"bound parameter named %r" % bind._orig_key
) from err
else:
new_params[existing._orig_key] = bind
for key, value in names_to_values.items():
try:
existing = new_params[key]
except KeyError as err:
raise exc.ArgumentError(
"This text() construct doesn't define a "
"bound parameter named %r" % key
) from err
else:
new_params[key] = existing._with_value(value, required=False)
return self
@util.preload_module("sqlalchemy.sql.selectable")
def columns(self, *cols, **types):
r"""Turn this :class:`_expression.TextClause` object into a
:class:`_expression.TextualSelect`
object that serves the same role as a SELECT
statement.
The :class:`_expression.TextualSelect` is part of the
:class:`_expression.SelectBase`
hierarchy and can be embedded into another statement by using the
:meth:`_expression.TextualSelect.subquery` method to produce a
:class:`.Subquery`
object, which can then be SELECTed from.
This function essentially bridges the gap between an entirely
textual SELECT statement and the SQL expression language concept
of a "selectable"::
from sqlalchemy.sql import column, text
stmt = text("SELECT id, name FROM some_table")
stmt = stmt.columns(column('id'), column('name')).subquery('st')
stmt = select(mytable).\
select_from(
mytable.join(stmt, mytable.c.name == stmt.c.name)
).where(stmt.c.id > 5)
Above, we pass a series of :func:`_expression.column` elements to the
:meth:`_expression.TextClause.columns` method positionally. These
:func:`_expression.column`
elements now become first class elements upon the
:attr:`_expression.TextualSelect.selected_columns` column collection,
which then
become part of the :attr:`.Subquery.c` collection after
:meth:`_expression.TextualSelect.subquery` is invoked.
The column expressions we pass to
:meth:`_expression.TextClause.columns` may
also be typed; when we do so, these :class:`.TypeEngine` objects become
the effective return type of the column, so that SQLAlchemy's
result-set-processing systems may be used on the return values.
This is often needed for types such as date or boolean types, as well
as for unicode processing on some dialect configurations::
stmt = text("SELECT id, name, timestamp FROM some_table")
stmt = stmt.columns(
column('id', Integer),
column('name', Unicode),
column('timestamp', DateTime)
)
for id, name, timestamp in connection.execute(stmt):
print(id, name, timestamp)
As a shortcut to the above syntax, keyword arguments referring to
types alone may be used, if only type conversion is needed::
stmt = text("SELECT id, name, timestamp FROM some_table")
stmt = stmt.columns(
id=Integer,
name=Unicode,
timestamp=DateTime
)
for id, name, timestamp in connection.execute(stmt):
print(id, name, timestamp)
The positional form of :meth:`_expression.TextClause.columns`
also provides the
unique feature of **positional column targeting**, which is
particularly useful when using the ORM with complex textual queries. If
we specify the columns from our model to
:meth:`_expression.TextClause.columns`,
the result set will match to those columns positionally, meaning the
name or origin of the column in the textual SQL doesn't matter::
stmt = text("SELECT users.id, addresses.id, users.id, "
"users.name, addresses.email_address AS email "
"FROM users JOIN addresses ON users.id=addresses.user_id "
"WHERE users.id = 1").columns(
User.id,
Address.id,
Address.user_id,
User.name,
Address.email_address
)
query = session.query(User).from_statement(stmt).options(
contains_eager(User.addresses))
.. versionadded:: 1.1 the :meth:`_expression.TextClause.columns`
method now
offers positional column targeting in the result set when
the column expressions are passed purely positionally.
The :meth:`_expression.TextClause.columns` method provides a direct
route to calling :meth:`_expression.FromClause.subquery` as well as
:meth:`_expression.SelectBase.cte`
against a textual SELECT statement::
stmt = stmt.columns(id=Integer, name=String).cte('st')
stmt = select(sometable).where(sometable.c.id == stmt.c.id)
:param \*cols: A series of :class:`_expression.ColumnElement` objects,
typically
:class:`_schema.Column` objects from a :class:`_schema.Table`
or ORM level
column-mapped attributes, representing a set of columns that this
textual string will SELECT from.
:param \**types: A mapping of string names to :class:`.TypeEngine`
type objects indicating the datatypes to use for names that are
SELECTed from the textual string. Prefer to use the ``*cols``
argument as it also indicates positional ordering.
"""
selectable = util.preloaded.sql_selectable
positional_input_cols = [
ColumnClause(col.key, types.pop(col.key))
if col.key in types
else col
for col in cols
]
keyed_input_cols = [
ColumnClause(key, type_) for key, type_ in types.items()
]
return selectable.TextualSelect(
self,
positional_input_cols + keyed_input_cols,
positional=bool(positional_input_cols) and not keyed_input_cols,
)
@property
def type(self):
return type_api.NULLTYPE
@property
def comparator(self):
return self.type.comparator_factory(self)
def self_group(self, against=None):
if against is operators.in_op:
return Grouping(self)
else:
return self
class Null(SingletonConstant, roles.ConstExprRole, ColumnElement):
"""Represent the NULL keyword in a SQL statement.
:class:`.Null` is accessed as a constant via the
:func:`.null` function.
"""
__visit_name__ = "null"
_traverse_internals = []
@util.memoized_property
def type(self):
return type_api.NULLTYPE
@classmethod
def _instance(cls):
"""Return a constant :class:`.Null` construct."""
return Null()
Null._create_singleton()
class False_(SingletonConstant, roles.ConstExprRole, ColumnElement):
"""Represent the ``false`` keyword, or equivalent, in a SQL statement.
:class:`.False_` is accessed as a constant via the
:func:`.false` function.
"""
__visit_name__ = "false"
_traverse_internals = []
@util.memoized_property
def type(self):
return type_api.BOOLEANTYPE
def _negate(self):
return True_()
@classmethod
def _instance(cls):
return False_()
False_._create_singleton()
class True_(SingletonConstant, roles.ConstExprRole, ColumnElement):
"""Represent the ``true`` keyword, or equivalent, in a SQL statement.
:class:`.True_` is accessed as a constant via the
:func:`.true` function.
"""
__visit_name__ = "true"
_traverse_internals = []
@util.memoized_property
def type(self):
return type_api.BOOLEANTYPE
def _negate(self):
return False_()
@classmethod
def _ifnone(cls, other):
if other is None:
return cls._instance()
else:
return other
@classmethod
def _instance(cls):
return True_()
True_._create_singleton()
class ClauseList(
roles.InElementRole,
roles.OrderByRole,
roles.ColumnsClauseRole,
roles.DMLColumnRole,
ClauseElement,
):
"""Describe a list of clauses, separated by an operator.
By default, is comma-separated, such as a column listing.
"""
__visit_name__ = "clauselist"
_is_clause_list = True
_traverse_internals = [
("clauses", InternalTraversal.dp_clauseelement_list),
("operator", InternalTraversal.dp_operator),
]
def __init__(
self,
*clauses,
operator=operators.comma_op,
group=True,
group_contents=True,
_flatten_sub_clauses=False,
_literal_as_text_role: Type[roles.SQLRole] = roles.WhereHavingRole,
):
self.operator = operator
self.group = group
self.group_contents = group_contents
if _flatten_sub_clauses:
clauses = util.flatten_iterator(clauses)
self._text_converter_role: Type[roles.SQLRole] = _literal_as_text_role
text_converter_role: Type[roles.SQLRole] = _literal_as_text_role
if self.group_contents:
self.clauses = [
coercions.expect(
text_converter_role, clause, apply_propagate_attrs=self
).self_group(against=self.operator)
for clause in clauses
]
else:
self.clauses = [
coercions.expect(
text_converter_role, clause, apply_propagate_attrs=self
)
for clause in clauses
]
self._is_implicitly_boolean = operators.is_boolean(self.operator)
@classmethod
def _construct_raw(cls, operator, clauses=None):
self = cls.__new__(cls)
self.clauses = clauses if clauses else []
self.group = True
self.operator = operator
self.group_contents = True
self._is_implicitly_boolean = False
return self
def __iter__(self):
return iter(self.clauses)
def __len__(self):
return len(self.clauses)
@property
def _select_iterable(self):
return itertools.chain.from_iterable(
[elem._select_iterable for elem in self.clauses]
)
def append(self, clause):
if self.group_contents:
self.clauses.append(
coercions.expect(self._text_converter_role, clause).self_group(
against=self.operator
)
)
else:
self.clauses.append(
coercions.expect(self._text_converter_role, clause)
)
@property
def _from_objects(self):
return list(itertools.chain(*[c._from_objects for c in self.clauses]))
def self_group(self, against=None):
if self.group and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
class BooleanClauseList(ClauseList, ColumnElement[bool]):
__visit_name__ = "clauselist"
inherit_cache = True
def __init__(self, *arg, **kw):
raise NotImplementedError(
"BooleanClauseList has a private constructor"
)
@classmethod
def _process_clauses_for_boolean(
cls, operator, continue_on, skip_on, clauses
):
has_continue_on = None
convert_clauses = []
against = operators._asbool
lcc = 0
for clause in clauses:
if clause is continue_on:
# instance of continue_on, like and_(x, y, True, z), store it
# if we didn't find one already, we will use it if there
# are no other expressions here.
has_continue_on = clause
elif clause is skip_on:
# instance of skip_on, e.g. and_(x, y, False, z), cancels
# the rest out
convert_clauses = [clause]
lcc = 1
break
else:
if not lcc:
lcc = 1
else:
against = operator
# technically this would be len(convert_clauses) + 1
# however this only needs to indicate "greater than one"
lcc = 2
convert_clauses.append(clause)
if not convert_clauses and has_continue_on is not None:
convert_clauses = [has_continue_on]
lcc = 1
return lcc, [c.self_group(against=against) for c in convert_clauses]
@classmethod
def _construct(cls, operator, continue_on, skip_on, *clauses, **kw):
lcc, convert_clauses = cls._process_clauses_for_boolean(
operator,
continue_on,
skip_on,
[
coercions.expect(roles.WhereHavingRole, clause)
for clause in util.coerce_generator_arg(clauses)
],
)
if lcc > 1:
# multiple elements. Return regular BooleanClauseList
# which will link elements against the operator.
return cls._construct_raw(operator, convert_clauses)
elif lcc == 1:
# just one element. return it as a single boolean element,
# not a list and discard the operator.
return convert_clauses[0]
else:
# no elements period. deprecated use case. return an empty
# ClauseList construct that generates nothing unless it has
# elements added to it.
util.warn_deprecated(
"Invoking %(name)s() without arguments is deprecated, and "
"will be disallowed in a future release. For an empty "
"%(name)s() construct, use %(name)s(%(continue_on)s, *args)."
% {
"name": operator.__name__,
"continue_on": "True"
if continue_on is True_._singleton
else "False",
},
version="1.4",
)
return cls._construct_raw(operator)
@classmethod
def _construct_for_whereclause(cls, clauses):
operator, continue_on, skip_on = (
operators.and_,
True_._singleton,
False_._singleton,
)
lcc, convert_clauses = cls._process_clauses_for_boolean(
operator,
continue_on,
skip_on,
clauses, # these are assumed to be coerced already
)
if lcc > 1:
# multiple elements. Return regular BooleanClauseList
# which will link elements against the operator.
return cls._construct_raw(operator, convert_clauses)
elif lcc == 1:
# just one element. return it as a single boolean element,
# not a list and discard the operator.
return convert_clauses[0]
else:
return None
@classmethod
def _construct_raw(cls, operator, clauses=None):
self = cls.__new__(cls)
self.clauses = clauses if clauses else []
self.group = True
self.operator = operator
self.group_contents = True
self.type = type_api.BOOLEANTYPE
self._is_implicitly_boolean = True
return self
@classmethod
def and_(cls, *clauses):
r"""Produce a conjunction of expressions joined by ``AND``.
See :func:`_sql.and_` for full documentation.
"""
return cls._construct(
operators.and_, True_._singleton, False_._singleton, *clauses
)
@classmethod
def or_(cls, *clauses):
"""Produce a conjunction of expressions joined by ``OR``.
See :func:`_sql.or_` for full documentation.
"""
return cls._construct(
operators.or_, False_._singleton, True_._singleton, *clauses
)
@property
def _select_iterable(self):
return (self,)
def self_group(self, against=None):
if not self.clauses:
return self
else:
return super(BooleanClauseList, self).self_group(against=against)
def _negate(self):
return ClauseList._negate(self)
and_ = BooleanClauseList.and_
or_ = BooleanClauseList.or_
class Tuple(ClauseList, ColumnElement):
"""Represent a SQL tuple."""
__visit_name__ = "tuple"
_traverse_internals = ClauseList._traverse_internals + []
@util.preload_module("sqlalchemy.sql.sqltypes")
def __init__(self, *clauses, types=None):
sqltypes = util.preloaded.sql_sqltypes
if types is None:
clauses = [
coercions.expect(roles.ExpressionElementRole, c)
for c in clauses
]
else:
if len(types) != len(clauses):
raise exc.ArgumentError(
"Wrong number of elements for %d-tuple: %r "
% (len(types), clauses)
)
clauses = [
coercions.expect(
roles.ExpressionElementRole,
c,
type_=typ if not typ._isnull else None,
)
for typ, c in zip(types, clauses)
]
self.type = sqltypes.TupleType(*[arg.type for arg in clauses])
super(Tuple, self).__init__(*clauses)
@property
def _select_iterable(self):
return (self,)
def _bind_param(self, operator, obj, type_=None, expanding=False):
if expanding:
return BindParameter(
None,
value=obj,
_compared_to_operator=operator,
unique=True,
expanding=True,
type_=self.type,
)
else:
return Tuple(
*[
BindParameter(
None,
o,
_compared_to_operator=operator,
_compared_to_type=compared_to_type,
unique=True,
type_=type_,
)
for o, compared_to_type in zip(obj, self.type.types)
]
)
def self_group(self, against=None):
# Tuple is parenthesized by definition.
return self
class Case(ColumnElement[_T]):
"""Represent a ``CASE`` expression.
:class:`.Case` is produced using the :func:`.case` factory function,
as in::
from sqlalchemy import case
stmt = select(users_table).\
where(
case(
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J'),
else_='E'
)
)
Details on :class:`.Case` usage is at :func:`.case`.
.. seealso::
:func:`.case`
"""
__visit_name__ = "case"
_traverse_internals = [
("value", InternalTraversal.dp_clauseelement),
("whens", InternalTraversal.dp_clauseelement_tuples),
("else_", InternalTraversal.dp_clauseelement),
]
# for case(), the type is derived from the whens. so for the moment
# users would have to cast() the case to get a specific type
def __init__(self, *whens, value=None, else_=None):
whens = coercions._expression_collection_was_a_list(
"whens", "case", whens
)
try:
whens = util.dictlike_iteritems(whens)
except TypeError:
pass
whenlist = [
(
coercions.expect(
roles.ExpressionElementRole,
c,
apply_propagate_attrs=self,
).self_group(),
coercions.expect(roles.ExpressionElementRole, r),
)
for (c, r) in whens
]
if whenlist:
type_ = list(whenlist[-1])[-1].type
else:
type_ = None
if value is None:
self.value = None
else:
self.value = coercions.expect(roles.ExpressionElementRole, value)
self.type = type_
self.whens = whenlist
if else_ is not None:
self.else_ = coercions.expect(roles.ExpressionElementRole, else_)
else:
self.else_ = None
@property
def _from_objects(self):
return list(
itertools.chain(*[x._from_objects for x in self.get_children()])
)
class Cast(WrapsColumnExpression, ColumnElement[_T]):
"""Represent a ``CAST`` expression.
:class:`.Cast` is produced using the :func:`.cast` factory function,
as in::
from sqlalchemy import cast, Numeric
stmt = select(cast(product_table.c.unit_price, Numeric(10, 4)))
Details on :class:`.Cast` usage is at :func:`.cast`.
.. seealso::
:ref:`coretutorial_casts`
:func:`.cast`
:func:`.type_coerce` - an alternative to CAST that coerces the type
on the Python side only, which is often sufficient to generate the
correct SQL and data coercion.
"""
__visit_name__ = "cast"
_traverse_internals = [
("clause", InternalTraversal.dp_clauseelement),
("typeclause", InternalTraversal.dp_clauseelement),
]
def __init__(self, expression, type_):
self.type = type_api.to_instance(type_)
self.clause = coercions.expect(
roles.ExpressionElementRole,
expression,
type_=self.type,
apply_propagate_attrs=self,
)
self.typeclause = TypeClause(self.type)
@property
def _from_objects(self):
return self.clause._from_objects
@property
def wrapped_column_expression(self):
return self.clause
class TypeCoerce(WrapsColumnExpression, ColumnElement[_T]):
"""Represent a Python-side type-coercion wrapper.
:class:`.TypeCoerce` supplies the :func:`_expression.type_coerce`
function; see that function for usage details.
.. versionchanged:: 1.1 The :func:`.type_coerce` function now produces
a persistent :class:`.TypeCoerce` wrapper object rather than
translating the given object in place.
.. seealso::
:func:`_expression.type_coerce`
:func:`.cast`
"""
__visit_name__ = "type_coerce"
_traverse_internals = [
("clause", InternalTraversal.dp_clauseelement),
("type", InternalTraversal.dp_type),
]
def __init__(self, expression, type_):
self.type = type_api.to_instance(type_)
self.clause = coercions.expect(
roles.ExpressionElementRole,
expression,
type_=self.type,
apply_propagate_attrs=self,
)
@property
def _from_objects(self):
return self.clause._from_objects
@HasMemoized.memoized_attribute
def typed_expression(self):
if isinstance(self.clause, BindParameter):
bp = self.clause._clone()
bp.type = self.type
return bp
else:
return self.clause
@property
def wrapped_column_expression(self):
return self.clause
def self_group(self, against=None):
grouped = self.clause.self_group(against=against)
if grouped is not self.clause:
return TypeCoerce(grouped, self.type)
else:
return self
class Extract(ColumnElement[_T]):
"""Represent a SQL EXTRACT clause, ``extract(field FROM expr)``."""
__visit_name__ = "extract"
_traverse_internals = [
("expr", InternalTraversal.dp_clauseelement),
("field", InternalTraversal.dp_string),
]
def __init__(self, field, expr):
self.type = type_api.INTEGERTYPE
self.field = field
self.expr = coercions.expect(roles.ExpressionElementRole, expr)
@property
def _from_objects(self):
return self.expr._from_objects
class _label_reference(ColumnElement):
"""Wrap a column expression as it appears in a 'reference' context.
This expression is any that includes an _order_by_label_element,
which is a Label, or a DESC / ASC construct wrapping a Label.
The production of _label_reference() should occur when an expression
is added to this context; this includes the ORDER BY or GROUP BY of a
SELECT statement, as well as a few other places, such as the ORDER BY
within an OVER clause.
"""
__visit_name__ = "label_reference"
_traverse_internals = [("element", InternalTraversal.dp_clauseelement)]
def __init__(self, element):
self.element = element
@property
def _from_objects(self):
return ()
class _textual_label_reference(ColumnElement):
__visit_name__ = "textual_label_reference"
_traverse_internals = [("element", InternalTraversal.dp_string)]
def __init__(self, element):
self.element = element
@util.memoized_property
def _text_clause(self):
return TextClause(self.element)
class UnaryExpression(ColumnElement[_T]):
"""Define a 'unary' expression.
A unary expression has a single column expression
and an operator. The operator can be placed on the left
(where it is called the 'operator') or right (where it is called the
'modifier') of the column expression.
:class:`.UnaryExpression` is the basis for several unary operators
including those used by :func:`.desc`, :func:`.asc`, :func:`.distinct`,
:func:`.nulls_first` and :func:`.nulls_last`.
"""
__visit_name__ = "unary"
_traverse_internals = [
("element", InternalTraversal.dp_clauseelement),
("operator", InternalTraversal.dp_operator),
("modifier", InternalTraversal.dp_operator),
]
def __init__(
self,
element,
operator=None,
modifier=None,
type_: Union[Type["TypeEngine[_T]"], "TypeEngine[_T]"] = None,
wraps_column_expression=False,
):
self.operator = operator
self.modifier = modifier
self._propagate_attrs = element._propagate_attrs
self.element = element.self_group(
against=self.operator or self.modifier
)
self.type: TypeEngine[_T] = type_api.to_instance(type_)
self.wraps_column_expression = wraps_column_expression
@classmethod
def _create_nulls_first(cls, column):
return UnaryExpression(
coercions.expect(roles.ByOfRole, column),
modifier=operators.nulls_first_op,
wraps_column_expression=False,
)
@classmethod
def _create_nulls_last(cls, column):
return UnaryExpression(
coercions.expect(roles.ByOfRole, column),
modifier=operators.nulls_last_op,
wraps_column_expression=False,
)
@classmethod
def _create_desc(cls, column):
return UnaryExpression(
coercions.expect(roles.ByOfRole, column),
modifier=operators.desc_op,
wraps_column_expression=False,
)
@classmethod
def _create_asc(cls, column):
return UnaryExpression(
coercions.expect(roles.ByOfRole, column),
modifier=operators.asc_op,
wraps_column_expression=False,
)
@classmethod
def _create_distinct(cls, expr):
expr = coercions.expect(roles.ExpressionElementRole, expr)
return UnaryExpression(
expr,
operator=operators.distinct_op,
type_=expr.type,
wraps_column_expression=False,
)
@property
def _order_by_label_element(self):
if self.modifier in (operators.desc_op, operators.asc_op):
return self.element._order_by_label_element
else:
return None
@property
def _from_objects(self):
return self.element._from_objects
def _negate(self):
if self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity:
return UnaryExpression(
self.self_group(against=operators.inv),
operator=operators.inv,
type_=type_api.BOOLEANTYPE,
wraps_column_expression=self.wraps_column_expression,
)
else:
return ClauseElement._negate(self)
def self_group(self, against=None):
if self.operator and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
class CollectionAggregate(UnaryExpression):
"""Forms the basis for right-hand collection operator modifiers
ANY and ALL.
The ANY and ALL keywords are available in different ways on different
backends. On PostgreSQL, they only work for an ARRAY type. On
MySQL, they only work for subqueries.
"""
inherit_cache = True
@classmethod
def _create_any(cls, expr):
expr = coercions.expect(roles.ExpressionElementRole, expr)
expr = expr.self_group()
return CollectionAggregate(
expr,
operator=operators.any_op,
type_=type_api.NULLTYPE,
wraps_column_expression=False,
)
@classmethod
def _create_all(cls, expr):
expr = coercions.expect(roles.ExpressionElementRole, expr)
expr = expr.self_group()
return CollectionAggregate(
expr,
operator=operators.all_op,
type_=type_api.NULLTYPE,
wraps_column_expression=False,
)
# operate and reverse_operate are hardwired to
# dispatch onto the type comparator directly, so that we can
# ensure "reversed" behavior.
def operate(self, op, *other, **kwargs):
if not operators.is_comparison(op):
raise exc.ArgumentError(
"Only comparison operators may be used with ANY/ALL"
)
kwargs["reverse"] = kwargs["_any_all_expr"] = True
return self.comparator.operate(operators.mirror(op), *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
# comparison operators should never call reverse_operate
assert not operators.is_comparison(op)
raise exc.ArgumentError(
"Only comparison operators may be used with ANY/ALL"
)
class AsBoolean(WrapsColumnExpression, UnaryExpression):
inherit_cache = True
def __init__(self, element, operator, negate):
self.element = element
self.type = type_api.BOOLEANTYPE
self.operator = operator
self.negate = negate
self.modifier = None
self.wraps_column_expression = True
self._is_implicitly_boolean = element._is_implicitly_boolean
@property
def wrapped_column_expression(self):
return self.element
def self_group(self, against=None):
return self
def _negate(self):
if isinstance(self.element, (True_, False_)):
return self.element._negate()
else:
return AsBoolean(self.element, self.negate, self.operator)
class BinaryExpression(ColumnElement[_T]):
"""Represent an expression that is ``LEFT <operator> RIGHT``.
A :class:`.BinaryExpression` is generated automatically
whenever two column expressions are used in a Python binary expression::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print(column('a') + column('b'))
a + b
"""
__visit_name__ = "binary"
_traverse_internals = [
("left", InternalTraversal.dp_clauseelement),
("right", InternalTraversal.dp_clauseelement),
("operator", InternalTraversal.dp_operator),
("negate", InternalTraversal.dp_operator),
("modifiers", InternalTraversal.dp_plain_dict),
(
"type",
InternalTraversal.dp_type,
), # affects JSON CAST operators
]
_is_implicitly_boolean = True
"""Indicates that any database will know this is a boolean expression
even if the database does not have an explicit boolean datatype.
"""
def __init__(
self,
left: ColumnElement,
right: Union[ColumnElement, ClauseList],
operator,
type_: Optional[
Union[Type["TypeEngine[_T]"], "TypeEngine[_T]"]
] = None,
negate=None,
modifiers=None,
):
# allow compatibility with libraries that
# refer to BinaryExpression directly and pass strings
if isinstance(operator, str):
operator = operators.custom_op(operator)
self._orig = (left.__hash__(), right.__hash__())
self._propagate_attrs = left._propagate_attrs or right._propagate_attrs
self.left = left.self_group(against=operator)
self.right = right.self_group(against=operator)
self.operator = operator
self.type: TypeEngine[_T] = type_api.to_instance(type_)
self.negate = negate
self._is_implicitly_boolean = operators.is_boolean(operator)
if modifiers is None:
self.modifiers = {}
else:
self.modifiers = modifiers
def __bool__(self):
if self.operator in (operator.eq, operator.ne):
return self.operator(*self._orig)
else:
raise TypeError("Boolean value of this clause is not defined")
__nonzero__ = __bool__
if typing.TYPE_CHECKING:
def __invert__(
self: "BinaryExpression[_T]",
) -> "BinaryExpression[_T]":
...
@property
def is_comparison(self):
return operators.is_comparison(self.operator)
@property
def _from_objects(self):
return self.left._from_objects + self.right._from_objects
def self_group(self, against=None):
if operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def _negate(self):
if self.negate is not None:
return BinaryExpression(
self.left,
self.right._negate_in_binary(self.negate, self.operator),
self.negate,
negate=self.operator,
type_=self.type,
modifiers=self.modifiers,
)
else:
return super(BinaryExpression, self)._negate()
class Slice(ColumnElement):
"""Represent SQL for a Python array-slice object.
This is not a specific SQL construct at this level, but
may be interpreted by specific dialects, e.g. PostgreSQL.
"""
__visit_name__ = "slice"
_traverse_internals = [
("start", InternalTraversal.dp_clauseelement),
("stop", InternalTraversal.dp_clauseelement),
("step", InternalTraversal.dp_clauseelement),
]
def __init__(self, start, stop, step, _name=None):
self.start = coercions.expect(
roles.ExpressionElementRole,
start,
name=_name,
type_=type_api.INTEGERTYPE,
)
self.stop = coercions.expect(
roles.ExpressionElementRole,
stop,
name=_name,
type_=type_api.INTEGERTYPE,
)
self.step = coercions.expect(
roles.ExpressionElementRole,
step,
name=_name,
type_=type_api.INTEGERTYPE,
)
self.type = type_api.NULLTYPE
def self_group(self, against=None):
assert against is operator.getitem
return self
class IndexExpression(BinaryExpression):
"""Represent the class of expressions that are like an "index"
operation."""
inherit_cache = True
class GroupedElement(ClauseElement):
"""Represent any parenthesized expression"""
__visit_name__ = "grouping"
def self_group(self, against=None):
return self
def _ungroup(self):
return self.element._ungroup()
class Grouping(GroupedElement, ColumnElement):
"""Represent a grouping within a column expression"""
_traverse_internals = [
("element", InternalTraversal.dp_clauseelement),
("type", InternalTraversal.dp_type),
]
def __init__(self, element):
self.element = element
self.type = getattr(element, "type", type_api.NULLTYPE)
def _with_binary_element_type(self, type_):
return self.__class__(self.element._with_binary_element_type(type_))
@util.memoized_property
def _is_implicitly_boolean(self):
return self.element._is_implicitly_boolean
@property
def _tq_label(self):
return (
getattr(self.element, "_tq_label", None) or self._anon_name_label
)
@property
def _proxies(self):
if isinstance(self.element, ColumnElement):
return [self.element]
else:
return []
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {"element": self.element, "type": self.type}
def __setstate__(self, state):
self.element = state["element"]
self.type = state["type"]
RANGE_UNBOUNDED = util.symbol("RANGE_UNBOUNDED")
RANGE_CURRENT = util.symbol("RANGE_CURRENT")
class Over(ColumnElement[_T]):
"""Represent an OVER clause.
This is a special operator against a so-called
"window" function, as well as any aggregate function,
which produces results relative to the result set
itself. Most modern SQL backends now support window functions.
"""
__visit_name__ = "over"
_traverse_internals = [
("element", InternalTraversal.dp_clauseelement),
("order_by", InternalTraversal.dp_clauseelement),
("partition_by", InternalTraversal.dp_clauseelement),
("range_", InternalTraversal.dp_plain_obj),
("rows", InternalTraversal.dp_plain_obj),
]
order_by = None
partition_by = None
element = None
"""The underlying expression object to which this :class:`.Over`
object refers towards."""
def __init__(
self, element, partition_by=None, order_by=None, range_=None, rows=None
):
self.element = element
if order_by is not None:
self.order_by = ClauseList(
*util.to_list(order_by), _literal_as_text_role=roles.ByOfRole
)
if partition_by is not None:
self.partition_by = ClauseList(
*util.to_list(partition_by),
_literal_as_text_role=roles.ByOfRole,
)
if range_:
self.range_ = self._interpret_range(range_)
if rows:
raise exc.ArgumentError(
"'range_' and 'rows' are mutually exclusive"
)
else:
self.rows = None
elif rows:
self.rows = self._interpret_range(rows)
self.range_ = None
else:
self.rows = self.range_ = None
def __reduce__(self):
return self.__class__, (
self.element,
self.partition_by,
self.order_by,
self.range_,
self.rows,
)
def _interpret_range(self, range_):
if not isinstance(range_, tuple) or len(range_) != 2:
raise exc.ArgumentError("2-tuple expected for range/rows")
if range_[0] is None:
lower = RANGE_UNBOUNDED
else:
try:
lower = int(range_[0])
except ValueError as err:
raise exc.ArgumentError(
"Integer or None expected for range value"
) from err
else:
if lower == 0:
lower = RANGE_CURRENT
if range_[1] is None:
upper = RANGE_UNBOUNDED
else:
try:
upper = int(range_[1])
except ValueError as err:
raise exc.ArgumentError(
"Integer or None expected for range value"
) from err
else:
if upper == 0:
upper = RANGE_CURRENT
return lower, upper
@util.memoized_property
def type(self):
return self.element.type
@property
def _from_objects(self):
return list(
itertools.chain(
*[
c._from_objects
for c in (self.element, self.partition_by, self.order_by)
if c is not None
]
)
)
class WithinGroup(ColumnElement[_T]):
"""Represent a WITHIN GROUP (ORDER BY) clause.
This is a special operator against so-called
"ordered set aggregate" and "hypothetical
set aggregate" functions, including ``percentile_cont()``,
``rank()``, ``dense_rank()``, etc.
It's supported only by certain database backends, such as PostgreSQL,
Oracle and MS SQL Server.
The :class:`.WithinGroup` construct extracts its type from the
method :meth:`.FunctionElement.within_group_type`. If this returns
``None``, the function's ``.type`` is used.
"""
__visit_name__ = "withingroup"
_traverse_internals = [
("element", InternalTraversal.dp_clauseelement),
("order_by", InternalTraversal.dp_clauseelement),
]
order_by = None
def __init__(self, element, *order_by):
self.element = element
if order_by is not None:
self.order_by = ClauseList(
*util.to_list(order_by), _literal_as_text_role=roles.ByOfRole
)
def __reduce__(self):
return self.__class__, (self.element,) + tuple(self.order_by)
def over(self, partition_by=None, order_by=None, range_=None, rows=None):
"""Produce an OVER clause against this :class:`.WithinGroup`
construct.
This function has the same signature as that of
:meth:`.FunctionElement.over`.
"""
return Over(
self,
partition_by=partition_by,
order_by=order_by,
range_=range_,
rows=rows,
)
@util.memoized_property
def type(self):
wgt = self.element.within_group_type(self)
if wgt is not None:
return wgt
else:
return self.element.type
@property
def _from_objects(self):
return list(
itertools.chain(
*[
c._from_objects
for c in (self.element, self.order_by)
if c is not None
]
)
)
class FunctionFilter(ColumnElement):
"""Represent a function FILTER clause.
This is a special operator against aggregate and window functions,
which controls which rows are passed to it.
It's supported only by certain database backends.
Invocation of :class:`.FunctionFilter` is via
:meth:`.FunctionElement.filter`::
func.count(1).filter(True)
.. versionadded:: 1.0.0
.. seealso::
:meth:`.FunctionElement.filter`
"""
__visit_name__ = "funcfilter"
_traverse_internals = [
("func", InternalTraversal.dp_clauseelement),
("criterion", InternalTraversal.dp_clauseelement),
]
criterion = None
def __init__(self, func, *criterion):
self.func = func
self.filter(*criterion)
def filter(self, *criterion):
"""Produce an additional FILTER against the function.
This method adds additional criteria to the initial criteria
set up by :meth:`.FunctionElement.filter`.
Multiple criteria are joined together at SQL render time
via ``AND``.
"""
for criterion in list(criterion):
criterion = coercions.expect(roles.WhereHavingRole, criterion)
if self.criterion is not None:
self.criterion = self.criterion & criterion
else:
self.criterion = criterion
return self
def over(self, partition_by=None, order_by=None, range_=None, rows=None):
"""Produce an OVER clause against this filtered function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
The expression::
func.rank().filter(MyClass.y > 5).over(order_by='x')
is shorthand for::
from sqlalchemy import over, funcfilter
over(funcfilter(func.rank(), MyClass.y > 5), order_by='x')
See :func:`_expression.over` for a full description.
"""
return Over(
self,
partition_by=partition_by,
order_by=order_by,
range_=range_,
rows=rows,
)
def self_group(self, against=None):
if operators.is_precedent(operators.filter_op, against):
return Grouping(self)
else:
return self
@util.memoized_property
def type(self):
return self.func.type
@property
def _from_objects(self):
return list(
itertools.chain(
*[
c._from_objects
for c in (self.func, self.criterion)
if c is not None
]
)
)
class Label(roles.LabeledColumnExprRole, ColumnElement[_T]):
"""Represents a column label (AS).
Represent a label, as typically applied to any column-level
element using the ``AS`` sql keyword.
"""
__visit_name__ = "label"
_traverse_internals = [
("name", InternalTraversal.dp_anon_name),
("_type", InternalTraversal.dp_type),
("_element", InternalTraversal.dp_clauseelement),
]
def __init__(self, name, element, type_=None):
orig_element = element
element = coercions.expect(
roles.ExpressionElementRole,
element,
apply_propagate_attrs=self,
)
while isinstance(element, Label):
# TODO: this is only covered in test_text.py, but nothing
# fails if it's removed. determine rationale
element = element.element
if name:
self.name = name
else:
self.name = _anonymous_label.safe_construct(
id(self), getattr(element, "name", "anon")
)
if isinstance(orig_element, Label):
# TODO: no coverage for this block, again would be in
# test_text.py where the resolve_label concept is important
self._resolve_label = orig_element._label
self.key = self._tq_label = self._tq_key_label = self.name
self._element = element
self._type = type_
self._proxies = [element]
def __reduce__(self):
return self.__class__, (self.name, self._element, self._type)
@util.memoized_property
def _is_implicitly_boolean(self):
return self.element._is_implicitly_boolean
@HasMemoized.memoized_attribute
def _allow_label_resolve(self):
return self.element._allow_label_resolve
@property
def _order_by_label_element(self):
return self
@util.memoized_property
def type(self):
return type_api.to_instance(
self._type or getattr(self._element, "type", None)
)
@HasMemoized.memoized_attribute
def element(self):
return self._element.self_group(against=operators.as_)
def self_group(self, against=None):
return self._apply_to_inner(self._element.self_group, against=against)
def _negate(self):
return self._apply_to_inner(self._element._negate)
def _apply_to_inner(self, fn, *arg, **kw):
sub_element = fn(*arg, **kw)
if sub_element is not self._element:
return Label(self.name, sub_element, type_=self._type)
else:
return self
@property
def primary_key(self):
return self.element.primary_key
@property
def foreign_keys(self):
return self.element.foreign_keys
def _copy_internals(self, clone=_clone, anonymize_labels=False, **kw):
self._reset_memoizations()
self._element = clone(self._element, **kw)
if anonymize_labels:
self.name = _anonymous_label.safe_construct(
id(self), getattr(self.element, "name", "anon")
)
self.key = self._tq_label = self._tq_key_label = self.name
@property
def _from_objects(self):
return self.element._from_objects
def _make_proxy(self, selectable, name=None, **kw):
name = self.name if not name else name
key, e = self.element._make_proxy(
selectable,
name=name,
disallow_is_literal=True,
name_is_truncatable=isinstance(name, _truncated_label),
)
# there was a note here to remove this assertion, which was here
# to determine if we later could support a use case where
# the key and name of a label are separate. But I don't know what
# that case was. For now, this is an unexpected case that occurs
# when a label name conflicts with other columns and select()
# is attempting to disambiguate an explicit label, which is not what
# the user would want. See issue #6090.
if key != self.name:
raise exc.InvalidRequestError(
"Label name %s is being renamed to an anonymous label due "
"to disambiguation "
"which is not supported right now. Please use unique names "
"for explicit labels." % (self.name)
)
e._propagate_attrs = selectable._propagate_attrs
e._proxies.append(self)
if self._type is not None:
e.type = self._type
return self.key, e
class NamedColumn(ColumnElement[_T]):
is_literal = False
table = None
def _compare_name_for_result(self, other):
return (hasattr(other, "name") and self.name == other.name) or (
hasattr(other, "_label") and self._label == other._label
)
@util.memoized_property
def description(self):
return self.name
@HasMemoized.memoized_attribute
def _tq_key_label(self):
"""table qualified label based on column key.
for table-bound columns this is <tablename>_<column key/proxy key>;
all other expressions it resolves to key/proxy key.
"""
proxy_key = self._proxy_key
if proxy_key and proxy_key != self.name:
return self._gen_tq_label(proxy_key)
else:
return self._tq_label
@HasMemoized.memoized_attribute
def _tq_label(self):
"""table qualified label based on column name.
for table-bound columns this is <tablename>_<columnname>; all other
expressions it resolves to .name.
"""
return self._gen_tq_label(self.name)
@HasMemoized.memoized_attribute
def _render_label_in_columns_clause(self):
return True
@HasMemoized.memoized_attribute
def _non_anon_label(self):
return self.name
def _gen_tq_label(self, name, dedupe_on_key=True):
return name
def _bind_param(self, operator, obj, type_=None, expanding=False):
return BindParameter(
self.key,
obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
type_=type_,
unique=True,
expanding=expanding,
)
def _make_proxy(
self,
selectable,
name=None,
name_is_truncatable=False,
disallow_is_literal=False,
**kw,
):
c = ColumnClause(
coercions.expect(roles.TruncatedLabelRole, name or self.name)
if name_is_truncatable
else (name or self.name),
type_=self.type,
_selectable=selectable,
is_literal=False,
)
c._propagate_attrs = selectable._propagate_attrs
if name is None:
c.key = self.key
c._proxies = [self]
if selectable._is_clone_of is not None:
c._is_clone_of = selectable._is_clone_of.columns.get(c.key)
return c.key, c
class ColumnClause(
roles.DDLReferredColumnRole,
roles.LabeledColumnExprRole,
roles.StrAsPlainColumnRole,
Immutable,
NamedColumn[_T],
):
"""Represents a column expression from any textual string.
The :class:`.ColumnClause`, a lightweight analogue to the
:class:`_schema.Column` class, is typically invoked using the
:func:`_expression.column` function, as in::
from sqlalchemy import column
id, name = column("id"), column("name")
stmt = select(id, name).select_from("user")
The above statement would produce SQL like::
SELECT id, name FROM user
:class:`.ColumnClause` is the immediate superclass of the schema-specific
:class:`_schema.Column` object. While the :class:`_schema.Column`
class has all the
same capabilities as :class:`.ColumnClause`, the :class:`.ColumnClause`
class is usable by itself in those cases where behavioral requirements
are limited to simple SQL expression generation. The object has none of
the associations with schema-level metadata or with execution-time
behavior that :class:`_schema.Column` does,
so in that sense is a "lightweight"
version of :class:`_schema.Column`.
Full details on :class:`.ColumnClause` usage is at
:func:`_expression.column`.
.. seealso::
:func:`_expression.column`
:class:`_schema.Column`
"""
table = None
is_literal = False
__visit_name__ = "column"
_traverse_internals = [
("name", InternalTraversal.dp_anon_name),
("type", InternalTraversal.dp_type),
("table", InternalTraversal.dp_clauseelement),
("is_literal", InternalTraversal.dp_boolean),
]
onupdate = default = server_default = server_onupdate = None
_is_multiparam_column = False
def __init__(
self,
text: str,
type_: Optional[
Union[Type["TypeEngine[_T]"], "TypeEngine[_T]"]
] = None,
is_literal: bool = False,
_selectable: Optional["FromClause"] = None,
):
self.key = self.name = text
self.table = _selectable
self.type: TypeEngine[_T] = type_api.to_instance(type_)
self.is_literal = is_literal
def get_children(self, column_tables=False, **kw):
# override base get_children() to not return the Table
# or selectable that is parent to this column. Traversals
# expect the columns of tables and subqueries to be leaf nodes.
return []
@property
def entity_namespace(self):
if self.table is not None:
return self.table.entity_namespace
else:
return super(ColumnClause, self).entity_namespace
def _clone(self, detect_subquery_cols=False, **kw):
if (
detect_subquery_cols
and self.table is not None
and self.table._is_subquery
):
clone = kw.pop("clone")
table = clone(self.table, **kw)
new = table.c.corresponding_column(self)
return new
return super(ColumnClause, self)._clone(**kw)
@HasMemoized.memoized_attribute
def _from_objects(self):
t = self.table
if t is not None:
return [t]
else:
return []
@HasMemoized.memoized_attribute
def _render_label_in_columns_clause(self):
return self.table is not None
@property
def _ddl_label(self):
return self._gen_tq_label(self.name, dedupe_on_key=False)
def _compare_name_for_result(self, other):
if (
self.is_literal
or self.table is None
or self.table._is_textual
or not hasattr(other, "proxy_set")
or (
isinstance(other, ColumnClause)
and (
other.is_literal
or other.table is None
or other.table._is_textual
)
)
):
return (hasattr(other, "name") and self.name == other.name) or (
hasattr(other, "_tq_label")
and self._tq_label == other._tq_label
)
else:
return other.proxy_set.intersection(self.proxy_set)
def _gen_tq_label(self, name, dedupe_on_key=True):
"""generate table-qualified label
for a table-bound column this is <tablename>_<columnname>.
used primarily for LABEL_STYLE_TABLENAME_PLUS_COL
as well as the .columns collection on a Join object.
"""
t = self.table
if self.is_literal:
return None
elif t is not None and t.named_with_column:
if getattr(t, "schema", None):
label = t.schema.replace(".", "_") + "_" + t.name + "_" + name
else:
label = t.name + "_" + name
# propagate name quoting rules for labels.
if getattr(name, "quote", None) is not None:
if isinstance(label, quoted_name):
label.quote = name.quote
else:
label = quoted_name(label, name.quote)
elif getattr(t.name, "quote", None) is not None:
# can't get this situation to occur, so let's
# assert false on it for now
assert not isinstance(label, quoted_name)
label = quoted_name(label, t.name.quote)
if dedupe_on_key:
# ensure the label name doesn't conflict with that of an
# existing column. note that this implies that any Column
# must **not** set up its _label before its parent table has
# all of its other Column objects set up. There are several
# tables in the test suite which will fail otherwise; example:
# table "owner" has columns "name" and "owner_name". Therefore
# column owner.name cannot use the label "owner_name", it has
# to be "owner_name_1".
if label in t.c:
_label = label
counter = 1
while _label in t.c:
_label = label + "_" + str(counter)
counter += 1
label = _label
return coercions.expect(roles.TruncatedLabelRole, label)
else:
return name
def _make_proxy(
self,
selectable,
name=None,
name_is_truncatable=False,
disallow_is_literal=False,
**kw,
):
# the "is_literal" flag normally should never be propagated; a proxied
# column is always a SQL identifier and never the actual expression
# being evaluated. however, there is a case where the "is_literal" flag
# might be used to allow the given identifier to have a fixed quoting
# pattern already, so maintain the flag for the proxy unless a
# :class:`.Label` object is creating the proxy. See [ticket:4730].
is_literal = (
not disallow_is_literal
and self.is_literal
and (
# note this does not accommodate for quoted_name differences
# right now
name is None
or name == self.name
)
)
c = self._constructor(
coercions.expect(roles.TruncatedLabelRole, name or self.name)
if name_is_truncatable
else (name or self.name),
type_=self.type,
_selectable=selectable,
is_literal=is_literal,
)
c._propagate_attrs = selectable._propagate_attrs
if name is None:
c.key = self.key
c._proxies = [self]
if selectable._is_clone_of is not None:
c._is_clone_of = selectable._is_clone_of.columns.get(c.key)
return c.key, c
class TableValuedColumn(NamedColumn):
__visit_name__ = "table_valued_column"
_traverse_internals = [
("name", InternalTraversal.dp_anon_name),
("type", InternalTraversal.dp_type),
("scalar_alias", InternalTraversal.dp_clauseelement),
]
def __init__(self, scalar_alias, type_):
self.scalar_alias = scalar_alias
self.key = self.name = scalar_alias.name
self.type = type_
def _copy_internals(self, clone=_clone, **kw):
self.scalar_alias = clone(self.scalar_alias, **kw)
self.key = self.name = self.scalar_alias.name
@property
def _from_objects(self):
return [self.scalar_alias]
class CollationClause(ColumnElement):
__visit_name__ = "collation"
_traverse_internals = [("collation", InternalTraversal.dp_string)]
@classmethod
def _create_collation_expression(cls, expression, collation):
expr = coercions.expect(roles.ExpressionElementRole, expression)
return BinaryExpression(
expr,
CollationClause(collation),
operators.collate,
type_=expression.type,
)
def __init__(self, collation):
self.collation = collation
class _IdentifiedClause(Executable, ClauseElement):
__visit_name__ = "identified"
def __init__(self, ident):
self.ident = ident
class SavepointClause(_IdentifiedClause):
__visit_name__ = "savepoint"
inherit_cache = False
class RollbackToSavepointClause(_IdentifiedClause):
__visit_name__ = "rollback_to_savepoint"
inherit_cache = False
class ReleaseSavepointClause(_IdentifiedClause):
__visit_name__ = "release_savepoint"
inherit_cache = False
class quoted_name(util.MemoizedSlots, str):
"""Represent a SQL identifier combined with quoting preferences.
:class:`.quoted_name` is a Python unicode/str subclass which
represents a particular identifier name along with a
``quote`` flag. This ``quote`` flag, when set to
``True`` or ``False``, overrides automatic quoting behavior
for this identifier in order to either unconditionally quote
or to not quote the name. If left at its default of ``None``,
quoting behavior is applied to the identifier on a per-backend basis
based on an examination of the token itself.
A :class:`.quoted_name` object with ``quote=True`` is also
prevented from being modified in the case of a so-called
"name normalize" option. Certain database backends, such as
Oracle, Firebird, and DB2 "normalize" case-insensitive names
as uppercase. The SQLAlchemy dialects for these backends
convert from SQLAlchemy's lower-case-means-insensitive convention
to the upper-case-means-insensitive conventions of those backends.
The ``quote=True`` flag here will prevent this conversion from occurring
to support an identifier that's quoted as all lower case against
such a backend.
The :class:`.quoted_name` object is normally created automatically
when specifying the name for key schema constructs such as
:class:`_schema.Table`, :class:`_schema.Column`, and others.
The class can also be
passed explicitly as the name to any function that receives a name which
can be quoted. Such as to use the :meth:`_engine.Engine.has_table`
method with
an unconditionally quoted name::
from sqlalchemy import create_engine
from sqlalchemy.sql import quoted_name
engine = create_engine("oracle+cx_oracle://some_dsn")
engine.has_table(quoted_name("some_table", True))
The above logic will run the "has table" logic against the Oracle backend,
passing the name exactly as ``"some_table"`` without converting to
upper case.
.. versionadded:: 0.9.0
.. versionchanged:: 1.2 The :class:`.quoted_name` construct is now
importable from ``sqlalchemy.sql``, in addition to the previous
location of ``sqlalchemy.sql.elements``.
"""
__slots__ = "quote", "lower", "upper"
def __new__(cls, value, quote):
if value is None:
return None
# experimental - don't bother with quoted_name
# if quote flag is None. doesn't seem to make any dent
# in performance however
# elif not sprcls and quote is None:
# return value
elif isinstance(value, cls) and (
quote is None or value.quote == quote
):
return value
self = super(quoted_name, cls).__new__(cls, value)
self.quote = quote
return self
def __reduce__(self):
return quoted_name, (str(self), self.quote)
def _memoized_method_lower(self):
if self.quote:
return self
else:
return str(self).lower()
def _memoized_method_upper(self):
if self.quote:
return self
else:
return str(self).upper()
def _find_columns(clause):
"""locate Column objects within the given expression."""
cols = util.column_set()
traverse(clause, {}, {"column": cols.add})
return cols
def _type_from_args(args):
for a in args:
if not a.type._isnull:
return a.type
else:
return type_api.NULLTYPE
def _corresponding_column_or_error(fromclause, column, require_embedded=False):
c = fromclause.corresponding_column(
column, require_embedded=require_embedded
)
if c is None:
raise exc.InvalidRequestError(
"Given column '%s', attached to table '%s', "
"failed to locate a corresponding column from table '%s'"
% (column, getattr(column, "table", None), fromclause.description)
)
return c
class AnnotatedColumnElement(Annotated):
def __init__(self, element, values):
Annotated.__init__(self, element, values)
for attr in (
"comparator",
"_proxy_key",
"_tq_key_label",
"_tq_label",
"_non_anon_label",
):
self.__dict__.pop(attr, None)
for attr in ("name", "key", "table"):
if self.__dict__.get(attr, False) is None:
self.__dict__.pop(attr)
def _with_annotations(self, values):
clone = super(AnnotatedColumnElement, self)._with_annotations(values)
clone.__dict__.pop("comparator", None)
return clone
@util.memoized_property
def name(self):
"""pull 'name' from parent, if not present"""
return self._Annotated__element.name
@util.memoized_property
def table(self):
"""pull 'table' from parent, if not present"""
return self._Annotated__element.table
@util.memoized_property
def key(self):
"""pull 'key' from parent, if not present"""
return self._Annotated__element.key
@util.memoized_property
def info(self):
return self._Annotated__element.info
@util.memoized_property
def _anon_name_label(self):
return self._Annotated__element._anon_name_label
class _truncated_label(quoted_name):
"""A unicode subclass used to identify symbolic "
"names that may require truncation."""
__slots__ = ()
def __new__(cls, value, quote=None):
quote = getattr(value, "quote", quote)
# return super(_truncated_label, cls).__new__(cls, value, quote, True)
return super(_truncated_label, cls).__new__(cls, value, quote)
def __reduce__(self):
return self.__class__, (str(self), self.quote)
def apply_map(self, map_):
return self
class conv(_truncated_label):
"""Mark a string indicating that a name has already been converted
by a naming convention.
This is a string subclass that indicates a name that should not be
subject to any further naming conventions.
E.g. when we create a :class:`.Constraint` using a naming convention
as follows::
m = MetaData(naming_convention={
"ck": "ck_%(table_name)s_%(constraint_name)s"
})
t = Table('t', m, Column('x', Integer),
CheckConstraint('x > 5', name='x5'))
The name of the above constraint will be rendered as ``"ck_t_x5"``.
That is, the existing name ``x5`` is used in the naming convention as the
``constraint_name`` token.
In some situations, such as in migration scripts, we may be rendering
the above :class:`.CheckConstraint` with a name that's already been
converted. In order to make sure the name isn't double-modified, the
new name is applied using the :func:`_schema.conv` marker. We can
use this explicitly as follows::
m = MetaData(naming_convention={
"ck": "ck_%(table_name)s_%(constraint_name)s"
})
t = Table('t', m, Column('x', Integer),
CheckConstraint('x > 5', name=conv('ck_t_x5')))
Where above, the :func:`_schema.conv` marker indicates that the constraint
name here is final, and the name will render as ``"ck_t_x5"`` and not
``"ck_t_ck_t_x5"``
.. versionadded:: 0.9.4
.. seealso::
:ref:`constraint_naming_conventions`
"""
__slots__ = ()
_NONE_NAME = util.symbol("NONE_NAME")
"""indicate a 'deferred' name that was ultimately the value None."""
# for backwards compatibility in case
# someone is re-implementing the
# _truncated_identifier() sequence in a custom
# compiler
_generated_label = _truncated_label
class _anonymous_label(_truncated_label):
"""A unicode subclass used to identify anonymously
generated names."""
__slots__ = ()
@classmethod
def safe_construct(
cls, seed, body, enclosing_label=None, sanitize_key=False
) -> "_anonymous_label":
if sanitize_key:
body = re.sub(r"[%\(\) \$]+", "_", body).strip("_")
label = "%%(%d %s)s" % (seed, body.replace("%", "%%"))
if enclosing_label:
label = "%s%s" % (enclosing_label, label)
return _anonymous_label(label)
def __add__(self, other):
if "%" in other and not isinstance(other, _anonymous_label):
other = str(other).replace("%", "%%")
else:
other = str(other)
return _anonymous_label(
quoted_name(
str.__add__(self, other),
self.quote,
)
)
def __radd__(self, other):
if "%" in other and not isinstance(other, _anonymous_label):
other = str(other).replace("%", "%%")
else:
other = str(other)
return _anonymous_label(
quoted_name(
str.__add__(other, self),
self.quote,
)
)
def apply_map(self, map_):
if self.quote is not None:
# preserve quoting only if necessary
return quoted_name(self % map_, self.quote)
else:
# else skip the constructor call
return self % map_
|
sqlalchemy/sqlalchemy
|
lib/sqlalchemy/sql/elements.py
|
Python
|
mit
| 136,961
|
[
"VisIt"
] |
1c15d08e4fbb12c30e31fee6327eb26303dc4ed279a00cdc5f9636e90b38e427
|
import plumed
ib=plumed.InputBuilder()
try:
import MDAnalysis
_HAS_MDANALYSIS=True
except:
_HAS_MDANALYSIS=False
def check(s1,s2):
if s1 != s2:
raise RuntimeError (s1,s2)
def checkfiles(f1,f2):
import filecmp
import difflib
import sys
if not filecmp.cmp(f1,f2):
s1=[]
with open(f1,"r") as file1:
for l in file1:
s1.append(l)
s2=[]
with open(f2,"r") as file2:
for l in file2:
s2.append(l)
for line in difflib.context_diff(s1, s2, fromfile=f1, tofile=f2):
sys.stdout.write(line)
raise RuntimeError("Files are different")
def test1():
check(ib.TORSION("phi",ATOMS="5,7,9,15") , 'phi: TORSION ATOMS=5,7,9,15\n')
def test1b():
check(ib.TORSION__("phi",ATOMS="5,7,9,15") , 'phi: TORSION ATOMS=5,7,9,15\n')
def test2():
check(ib.TORSION("phi",ATOMS="5 7 9 15") , 'phi: TORSION ATOMS={5 7 9 15}\n')
def test3():
check(ib.COORDINATION(GROUPA="1-10",GROUPB="11-20",SWITCH="RATIONAL NN=6 R_0=1") , 'COORDINATION GROUPA=1-10 GROUPB=11-20 SWITCH={RATIONAL NN=6 R_0=1}\n')
def test4():
check(ib.COORDINATION(GROUPA="""
1 2 3 4 5 6 7 8 9 10
""",GROUPB="""
11 12 13 14 15 16 17 18 19 20
""",SWITCH="""
RATIONAL NN=6 R_0=1
""") , 'COORDINATION GROUPA={ 1 2 3 4 5 6 7 8 9 10 } GROUPB={ 11 12 13 14 15 16 17 18 19 20 } SWITCH={ RATIONAL NN=6 R_0=1 }\n')
def test5():
check(ib.DISTANCE("d",ATOMS="11 21",NOPBC=True) , 'd: DISTANCE ATOMS={11 21} NOPBC\n')
def test6():
check(ib.DISTANCE("d",ATOMS="11 21",NOPBC=False),'d: DISTANCE ATOMS={11 21}\n')
def test7():
check(ib.METAD(ARG="phi psi",PACE=500,HEIGHT=1.2,SIGMA="0.35 0.35",FILE="HILLS"),'METAD ARG={phi psi} FILE=HILLS HEIGHT=1.2 PACE=500 SIGMA={0.35 0.35}\n')
def test8():
check(ib.GROUP("g",ATOMS=range(1,101)),'g: GROUP ATOMS={1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100}\n')
def test9():
check(ib.METAD(ARG=("phi","psi"),PACE=500,HEIGHT=1.2,SIGMA=(0.35,"pi/8"),FILE="HILLS"),'METAD ARG={phi psi} FILE=HILLS HEIGHT=1.2 PACE=500 SIGMA={0.35 pi/8}\n')
def test10():
check(ib.MOVINGRESTRAINT(ARG="d1",KAPPA0=0,KAPPA1=10.0,AT0=20,AT1=20,STEP0=1,STEP1=10000),'MOVINGRESTRAINT ARG=d1 AT0=20 AT1=20 KAPPA0=0 KAPPA1=10.0 STEP0=1 STEP1=10000\n')
def test11():
check(
ib.MOVINGRESTRAINT(ARG="d1",KAPPA=ib.numbered([0,10.0]),AT=ib.numbered([20,20]),STEP=ib.numbered([1,10000]))
,
'MOVINGRESTRAINT ARG=d1 AT0=20 AT1=20 KAPPA0=0 KAPPA1=10.0 STEP0=1 STEP1=10000\n'
)
def test12():
check(
ib.MOVINGRESTRAINT(ARG="d1",KAPPA=ib.numbered([100]),AT=ib.numbered({0:0.0,2:10.0}),STEP=ib.numbered((0,10,20,30)))
,
'MOVINGRESTRAINT ARG=d1 AT0=0.0 AT2=10.0 KAPPA0=100 STEP0=0 STEP1=10 STEP2=20 STEP3=30\n'
)
def test13():
check(
ib.MOVINGRESTRAINT(ARG="d1,d2",
KAPPA=ib.numbered([11]),
AT=ib.numbered(((0.0,1.0),(2.0,3.0))),
STEP=ib.numbered((0,100)))
,
'MOVINGRESTRAINT ARG=d1,d2 AT0={0.0 1.0} AT1={2.0 3.0} KAPPA0=11 STEP0=0 STEP1=100\n'
)
def test14():
check(
ib.MOVINGRESTRAINT(ARG="d1,d2",
KAPPA=ib.numbered([100]),AT=ib.numbered(([0.0,"pi"],[2.0,"pi"])),
STEP=ib.numbered((0,100)))
,
'MOVINGRESTRAINT ARG=d1,d2 AT0={0.0 pi} AT1={2.0 pi} KAPPA0=100 STEP0=0 STEP1=100\n'
)
def test15():
check(
ib.RESTRAINT(ARG="d1",KAPPA=10,AT=ib.replicas((0.0,1.0,2.0,3.0)))
,
'RESTRAINT ARG=d1 AT=@replicas:{0.0 1.0 2.0 3.0} KAPPA=10\n'
)
def test16():
try:
import numpy
except:
print("This test requires numpy module installed.")
check(
ib.RESTRAINT(ARG="d1",KAPPA=10,AT=ib.replicas(numpy.linspace(3.0,5.0,17)))
,
'RESTRAINT ARG=d1 AT=@replicas:{3.0 3.125 3.25 3.375 3.5 3.625 3.75 3.875 4.0 4.125 4.25 4.375 4.5 4.625 4.75 4.875 5.0} KAPPA=10\n'
)
def test17():
check(
ib.RESTRAINT(ARG="d1,d2",KAPPA=(10,10),AT=ib.replicas(([0.0,1.0],[10.0,11.0])))
,
'RESTRAINT ARG=d1,d2 AT=@replicas:{{0.0 1.0} {10.0 11.0}} KAPPA={10 10}\n'
)
def test18():
ib1=plumed.InputBuilder(comma_separator=True)
check(
ib1.GROUP("g1",ATOMS=[1,2,3,4,5,6,7,8,9,10])
,
'g1: GROUP ATOMS=1,2,3,4,5,6,7,8,9,10\n'
)
def test19():
check(ib.at.phi(6),'@phi-6')
def test20():
check(ib.at.phi(range(1,11),"A"),'@phi-A1 @phi-A2 @phi-A3 @phi-A4 @phi-A5 @phi-A6 @phi-A7 @phi-A8 @phi-A9 @phi-A10')
def test20():
check(ib.at.phi(range(1,11),["A","B"]),'@phi-A1 @phi-A2 @phi-A3 @phi-A4 @phi-A5 @phi-A6 @phi-A7 @phi-A8 @phi-A9 @phi-A10 @phi-B1 @phi-B2 @phi-B3 @phi-B4 @phi-B5 @phi-B6 @phi-B7 @phi-B8 @phi-B9 @phi-B10')
def test21():
check(ib.at.phi(4,[1,2]),'@phi-1_4 @phi-2_4')
def test22():
check(ib.at("OW",range(20,40)),'@OW-20 @OW-21 @OW-22 @OW-23 @OW-24 @OW-25 @OW-26 @OW-27 @OW-28 @OW-29 @OW-30 @OW-31 @OW-32 @OW-33 @OW-34 @OW-35 @OW-36 @OW-37 @OW-38 @OW-39')
def test23():
check(ib.at.mdatoms,'@mdatoms')
def test24():
check(ib.RESTRAINT(ARG="d1,d2",verbatim="AT={10 20} KAPPA={5 6}"),'RESTRAINT ARG=d1,d2 AT={10 20} KAPPA={5 6}\n')
def test25():
check(ib.verbatim("# here is a comment"),'# here is a comment\n')
if _HAS_MDANALYSIS:
def test_mdanalysis():
u=MDAnalysis.Universe("test/ref.pdb")
check(
ib.GROUP(ATOMS=u.select_atoms("name C2 C4 C6")),
'GROUP ATOMS={16 20 25 50 54 59 84 88 93 118 122 127 147 151 156 178 182 187 209 213 218 240 244 249}\n'
)
check(
ib.GROUP(ATOMS=u.select_atoms("name C2","name C4","name C6")),
'GROUP ATOMS={20 54 88 122 156 187 218 249 25 59 93 127 151 182 213 244 16 50 84 118 147 178 209 240}\n'
)
|
PabloPiaggi/plumed2
|
python/test/test_input_builder.py
|
Python
|
lgpl-3.0
| 5,836
|
[
"MDAnalysis"
] |
8c32d0847390da4c026c644ffb26851b910c7ce82edef67e60595f028f950801
|
#! /usr/bin/env python
########################################################################
# File : dirac-stager-show-stats
# Author : Daniela Remenska
########################################################################
"""
Reports breakdown of file(s) number/size in different staging states across Storage Elements.
Currently used Cache per SE is also reported. (active pins)
Example:
$ dirac-stager-show-stats
Status SE NumberOfFiles Size(GB)
--------------------------------------------------------------------------
Staged GRIDKA-RDST 1 4.5535
StageSubmitted GRIDKA-RDST 5 22.586
Waiting PIC-RDST 3 13.6478
WARNING: the Size for files with Status=New is not yet determined at the point of selection!
--------------------- current status of the SE Caches from the DB-----------
GRIDKA-RDST : 6 replicas with a size of 29.141 GB.
"""
from DIRAC.Core.Base.Script import Script
from DIRAC import gLogger, exit as DIRACExit
@Script()
def main():
Script.parseCommandLine(ignoreErrors=False)
from DIRAC.StorageManagementSystem.Client.StorageManagerClient import StorageManagerClient
client = StorageManagerClient()
res = client.getCacheReplicasSummary()
if not res["OK"]:
gLogger.fatal(res["Message"])
DIRACExit(2)
stagerInfo = res["Value"]
outStr = "\n"
outStr += " %s" % ("Status".ljust(20))
outStr += " %s" % ("SE".ljust(20))
outStr += " %s" % ("NumberOfFiles".ljust(20))
outStr += " %s" % ("Size(GB)".ljust(20))
outStr += " \n--------------------------------------------------------------------------\n"
if stagerInfo:
for info in stagerInfo.values():
outStr += " %s" % (info["Status"].ljust(20))
outStr += " %s" % (info["SE"].ljust(20))
outStr += " %s" % (str(info["NumFiles"]).ljust(20))
outStr += " %s\n" % (str(info["SumFiles"]).ljust(20))
else:
outStr += " %s" % ("Nothing to see here...Bye")
outStr += " \nWARNING: the Size for files with Status=New is not yet determined at the point of selection!\n"
outStr += "--------------------- current status of the SE Caches from the DB-----------"
res = client.getSubmittedStagePins()
if not res["OK"]:
gLogger.fatal(res["Message"])
DIRACExit(2)
storageElementUsage = res["Value"]
if storageElementUsage:
for storageElement in storageElementUsage.keys():
seDict = storageElementUsage[storageElement]
seDict["TotalSize"] = int(seDict["TotalSize"] / (1000 * 1000 * 1000.0))
outStr += " \n %s: %s replicas with a size of %.3f GB." % (
storageElement.ljust(15),
str(seDict["Replicas"]).rjust(6),
seDict["TotalSize"],
)
else:
outStr += " %s" % "\nStageRequest.getStorageUsage: No active stage/pin requests found."
gLogger.notice(outStr)
DIRACExit(0)
if __name__ == "__main__":
main()
|
DIRACGrid/DIRAC
|
src/DIRAC/StorageManagementSystem/scripts/dirac_stager_show_stats.py
|
Python
|
gpl-3.0
| 3,161
|
[
"DIRAC"
] |
04890c6270c95d2cb9d9ca7789636ecbd2290d8866c9576b2a699da4ba19397c
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module is intended to be used to compute Pourbaix diagrams
of arbitrary compositions and formation energies. If you use
this module in your work, please consider citing the following:
General formalism for solid-aqueous equilibria from DFT:
Persson et al., DOI: 10.1103/PhysRevB.85.235438
Decomposition maps, or Pourbaix hull diagrams
Singh et al., DOI: 10.1021/acs.chemmater.7b03980
Fast computation of many-element Pourbaix diagrams:
Patel et al., https://arxiv.org/abs/1909.00035 (submitted)
"""
import logging
import numpy as np
import itertools
import re
from copy import deepcopy
from functools import cmp_to_key, partial, lru_cache
from monty.json import MSONable, MontyDecoder
from multiprocessing import Pool
import warnings
from scipy.spatial import ConvexHull, HalfspaceIntersection
try:
from scipy.special import comb
except ImportError:
from scipy.misc import comb
from pymatgen.util.coord import Simplex
from pymatgen.util.string import latexify
from pymatgen.util.plotting import pretty_plot
from pymatgen.core.periodic_table import Element
from pymatgen.core.composition import Composition
from pymatgen.core.ion import Ion
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.entries.compatibility import MU_H2O
from pymatgen.analysis.reaction_calculator import Reaction, ReactionError
from pymatgen.analysis.phase_diagram import PhaseDiagram, PDEntry
from tqdm import tqdm
__author__ = "Sai Jayaraman"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.4"
__maintainer__ = "Joseph Montoya"
__credits__ = "Arunima Singh, Joseph Montoya, Anjli Patel"
__email__ = "joseph.montoya@tri.global"
__status__ = "Production"
__date__ = "Nov 1, 2012"
logger = logging.getLogger(__name__)
PREFAC = 0.0591
# TODO: Revise to more closely reflect PDEntry, invoke from energy/composition
# TODO: PourbaixEntries depend implicitly on having entry energies be
# formation energies, should be a better way to get from raw energies
# TODO: uncorrected_energy is a bit of a misnomer, but not sure what to rename
class PourbaixEntry(MSONable):
"""
An object encompassing all data relevant to a solid or ion
in a pourbaix diagram. Each bulk solid/ion has an energy
g of the form: e = e0 + 0.0591 log10(conc) - nO mu_H2O
+ (nH - 2nO) pH + phi (-nH + 2nO + q)
Note that the energies corresponding to the input entries
should be formation energies with respect to hydrogen and
oxygen gas in order for the pourbaix diagram formalism to
work. This may be changed to be more flexible in the future.
"""
def __init__(self, entry, entry_id=None, concentration=1e-6):
"""
Args:
entry (ComputedEntry/ComputedStructureEntry/PDEntry/IonEntry): An
entry object
entry_id ():
concentration ():
"""
self.entry = entry
if isinstance(entry, IonEntry):
self.concentration = concentration
self.phase_type = "Ion"
self.charge = entry.ion.charge
else:
self.concentration = 1.0
self.phase_type = "Solid"
self.charge = 0.0
self.uncorrected_energy = entry.energy
if entry_id is not None:
self.entry_id = entry_id
elif hasattr(entry, "entry_id") and entry.entry_id:
self.entry_id = entry.entry_id
else:
self.entry_id = None
@property
def npH(self):
"""
Returns:
"""
return self.entry.composition.get("H", 0.) - 2 * self.entry.composition.get("O", 0.)
@property
def nH2O(self):
"""
Returns: Number of H2O.
"""
return self.entry.composition.get("O", 0.)
@property
def nPhi(self):
"""
Returns: Number of H2O.
"""
return self.npH - self.charge
@property
def name(self):
"""
Returns: Name for entry
"""
if self.phase_type == "Solid":
return self.entry.composition.reduced_formula + "(s)"
elif self.phase_type == "Ion":
return self.entry.name
@property
def energy(self):
"""
returns energy
Returns (float): total energy of the pourbaix
entry (at pH, V = 0 vs. SHE)
"""
# Note: this implicitly depends on formation energies as input
return self.uncorrected_energy + self.conc_term - (MU_H2O * self.nH2O)
@property
def energy_per_atom(self):
"""
energy per atom of the pourbaix entry
Returns (float): energy per atom
"""
return self.energy / self.composition.num_atoms
def energy_at_conditions(self, pH, V):
"""
Get free energy for a given pH and V
Args:
pH (float): pH at which to evaluate free energy
V (float): voltage at which to evaluate free energy
Returns:
free energy at conditions
"""
return self.energy + self.npH * PREFAC * pH + self.nPhi * V
def get_element_fraction(self, element):
"""
Gets the elemental fraction of a given non-OH element
Args:
element (Element or str): string or element corresponding
to element to get from composition
Returns:
fraction of element / sum(all non-OH elements)
"""
return self.composition.get(element) * self.normalization_factor
@property
def normalized_energy(self):
"""
Returns:
energy normalized by number of non H or O atoms, e. g.
for Zn2O6, energy / 2 or for AgTe3(OH)3, energy / 4
"""
return self.energy * self.normalization_factor
def normalized_energy_at_conditions(self, pH, V):
"""
Energy at an electrochemical condition, compatible with
numpy arrays for pH/V input
Args:
pH (float): pH at condition
V (float): applied potential at condition
Returns:
energy normalized by number of non-O/H atoms at condition
"""
return self.energy_at_conditions(pH, V) * self.normalization_factor
@property
def conc_term(self):
"""
Returns the concentration contribution to the free energy,
and should only be present when there are ions in the entry
"""
return PREFAC * np.log10(self.concentration)
# TODO: not sure if these are strictly necessary with refactor
def as_dict(self):
"""
Returns dict which contains Pourbaix Entry data.
Note that the pH, voltage, H2O factors are always calculated when
constructing a PourbaixEntry object.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
if isinstance(self.entry, IonEntry):
d["entry_type"] = "Ion"
else:
d["entry_type"] = "Solid"
d["entry"] = self.entry.as_dict()
d["concentration"] = self.concentration
d["entry_id"] = self.entry_id
return d
@classmethod
def from_dict(cls, d):
"""
Invokes
"""
entry_type = d["entry_type"]
if entry_type == "Ion":
entry = IonEntry.from_dict(d["entry"])
else:
entry = PDEntry.from_dict(d["entry"])
entry_id = d["entry_id"]
concentration = d["concentration"]
return PourbaixEntry(entry, entry_id, concentration)
@property
def normalization_factor(self):
"""
Sum of number of atoms minus the number of H and O in composition
"""
return 1.0 / (self.num_atoms - self.composition.get('H', 0)
- self.composition.get('O', 0))
@property
def composition(self):
"""
Returns composition
"""
return self.entry.composition
@property
def num_atoms(self):
"""
Return number of atoms in current formula. Useful for normalization
"""
return self.composition.num_atoms
def __repr__(self):
return "Pourbaix Entry : {} with energy = {:.4f}, npH = {}, nPhi = {}, nH2O = {}, entry_id = {} ".format(
self.entry.composition, self.energy, self.npH, self.nPhi, self.nH2O, self.entry_id)
def __str__(self):
return self.__repr__()
class MultiEntry(PourbaixEntry):
"""
PourbaixEntry-like object for constructing multi-elemental Pourbaix
diagrams.
"""
def __init__(self, entry_list, weights=None):
"""
Initializes a MultiEntry.
Args:
entry_list ([PourbaixEntry]): List of component PourbaixEntries
weights ([float]): Weights associated with each entry. Default is None
"""
if weights is None:
self.weights = [1.0] * len(entry_list)
else:
self.weights = weights
self.entry_list = entry_list
@lru_cache()
def __getattr__(self, item):
"""
Because most of the attributes here are just weighted
averages of the entry_list, we save some space by
having a set of conditionals to define the attributes
"""
# Attributes that are weighted averages of entry attributes
if item in ["energy", "npH", "nH2O", "nPhi", "conc_term",
"composition", "uncorrected_energy"]:
# TODO: Composition could be changed for compat with sum
if item == "composition":
start = Composition({})
else:
start = 0
return sum([getattr(e, item) * w
for e, w in zip(self.entry_list, self.weights)], start)
# Attributes that are just lists of entry attributes
elif item in ["entry_id", "phase_type"]:
return [getattr(e, item) for e in self.entry_list]
# normalization_factor, num_atoms should work from superclass
return self.__getattribute__(item)
@property
def name(self):
"""
MultiEntry name, i. e. the name of each entry joined by ' + '
"""
return " + ".join([e.name for e in self.entry_list])
def __repr__(self):
return "Multiple Pourbaix Entry: energy = {:.4f}, npH = {}, nPhi = {}, nH2O = {}, entry_id = {}, species: {}" \
.format(self.energy, self.npH, self.nPhi, self.nH2O, self.entry_id, self.name)
def __str__(self):
return self.__repr__()
def as_dict(self):
"""
Returns: MSONable dict
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entry_list": [e.as_dict() for e in self.entry_list],
"weights": self.weights}
@classmethod
def from_dict(cls, d):
"""
Args:
d (): Dict representation
Returns:
MultiEntry
"""
entry_list = [PourbaixEntry.from_dict(e) for e in d.get("entry_list")]
return cls(entry_list, d.get("weights"))
# TODO: this class isn't particularly useful in its current form, could be
# refactored to include information about the reference solid
class IonEntry(PDEntry):
"""
Object similar to PDEntry, but contains an Ion object instead of a
Composition object.
.. attribute:: name
A name for the entry. This is the string shown in the phase diagrams.
By default, this is the reduced formula for the composition, but can be
set to some other string for display purposes.
"""
def __init__(self, ion, energy, name=None, attribute=None):
"""
Args:
ion: Ion object
energy: Energy for composition.
name: Optional parameter to name the entry. Defaults to the
chemical formula.
"""
self.ion = ion
# Auto-assign name
name = name if name else self.ion.reduced_formula
super(IonEntry, self).__init__(
composition=ion.composition, energy=energy, name=name,
attribute=attribute)
@classmethod
def from_dict(cls, d):
"""
Returns an IonEntry object from a dict.
"""
return IonEntry(Ion.from_dict(d["ion"]), d["energy"], d.get("name"),
d.get("attribute"))
def as_dict(self):
"""
Creates a dict of composition, energy, and ion name
"""
d = {"ion": self.ion.as_dict(), "energy": self.energy,
"name": self.name}
return d
def __repr__(self):
return "IonEntry : {} with energy = {:.4f}".format(self.composition,
self.energy)
def __str__(self):
return self.__repr__()
def ion_or_solid_comp_object(formula):
"""
Returns either an ion object or composition object given
a formula.
Args:
formula: String formula. Eg. of ion: NaOH(aq), Na[+];
Eg. of solid: Fe2O3(s), Fe(s), Na2O
Returns:
Composition/Ion object
"""
m = re.search(r"\[([^\[\]]+)\]|\(aq\)", formula)
if m:
comp_obj = Ion.from_formula(formula)
elif re.search(r"\(s\)", formula):
comp_obj = Composition(formula[:-3])
else:
comp_obj = Composition(formula)
return comp_obj
ELEMENTS_HO = {Element('H'), Element('O')}
# TODO: the solids filter breaks some of the functionality of the
# heatmap plotter, because the reference states for decomposition
# don't include oxygen/hydrogen in the OER/HER regions
# TODO: create a from_phase_diagram class method for non-formation energy
# invocation
# TODO: invocation from a MultiEntry entry list could be a bit more robust
# TODO: serialization is still a bit rough around the edges
class PourbaixDiagram(MSONable):
"""
Class to create a Pourbaix diagram from entries
"""
def __init__(self, entries, comp_dict=None, conc_dict=None,
filter_solids=False, nproc=None):
"""
Args:
entries ([PourbaixEntry] or [MultiEntry]): Entries list
containing Solids and Ions or a list of MultiEntries
comp_dict ({str: float}): Dictionary of compositions,
defaults to equal parts of each elements
conc_dict ({str: float}): Dictionary of ion concentrations,
defaults to 1e-6 for each element
filter_solids (bool): applying this filter to a pourbaix
diagram ensures all included phases are filtered by
stability on the compositional phase diagram. This
breaks some of the functionality of the analysis,
though, so use with caution.
nproc (int): number of processes to generate multientries with
in parallel. Defaults to None (serial processing)
"""
entries = deepcopy(entries)
# Get non-OH elements
self.pbx_elts = set(itertools.chain.from_iterable(
[entry.composition.elements for entry in entries]))
self.pbx_elts = list(self.pbx_elts - ELEMENTS_HO)
self.dim = len(self.pbx_elts) - 1
# Process multientry inputs
if isinstance(entries[0], MultiEntry):
self._processed_entries = entries
# Extract individual entries
single_entries = list(set(itertools.chain.from_iterable(
[e.entry_list for e in entries])))
self._unprocessed_entries = single_entries
self._filtered_entries = single_entries
self._conc_dict = None
self._elt_comp = {k: v for k, v in entries[0].composition.items()
if k not in ELEMENTS_HO}
self._multielement = True
# Process single entry inputs
else:
# Set default conc/comp dicts
if not comp_dict:
comp_dict = {elt.symbol: 1. / len(self.pbx_elts) for elt in self.pbx_elts}
if not conc_dict:
conc_dict = {elt.symbol: 1e-6 for elt in self.pbx_elts}
self._conc_dict = conc_dict
self._elt_comp = comp_dict
self.pourbaix_elements = self.pbx_elts
solid_entries = [entry for entry in entries
if entry.phase_type == "Solid"]
ion_entries = [entry for entry in entries
if entry.phase_type == "Ion"]
# If a conc_dict is specified, override individual entry concentrations
for entry in ion_entries:
ion_elts = list(set(entry.composition.elements) - ELEMENTS_HO)
# TODO: the logic here for ion concentration setting is in two
# places, in PourbaixEntry and here, should be consolidated
if len(ion_elts) == 1:
entry.concentration = conc_dict[ion_elts[0].symbol] \
* entry.normalization_factor
elif len(ion_elts) > 1 and not entry.concentration:
raise ValueError("Elemental concentration not compatible "
"with multi-element ions")
self._unprocessed_entries = solid_entries + ion_entries
if not len(solid_entries + ion_entries) == len(entries):
raise ValueError("All supplied entries must have a phase type of "
"either \"Solid\" or \"Ion\"")
if filter_solids:
# O is 2.46 b/c pbx entry finds energies referenced to H2O
entries_HO = [ComputedEntry('H', 0), ComputedEntry('O', 2.46)]
solid_pd = PhaseDiagram(solid_entries + entries_HO)
solid_entries = list(set(solid_pd.stable_entries) - set(entries_HO))
self._filtered_entries = solid_entries + ion_entries
if len(comp_dict) > 1:
self._multielement = True
self._processed_entries = self._preprocess_pourbaix_entries(
self._filtered_entries, nproc=nproc)
else:
self._processed_entries = self._filtered_entries
self._multielement = False
self._stable_domains, self._stable_domain_vertices = \
self.get_pourbaix_domains(self._processed_entries)
def _convert_entries_to_points(self, pourbaix_entries):
"""
Args:
pourbaix_entries ([PourbaixEntry]): list of pourbaix entries
to process into vectors in nph-nphi-composition space
Returns:
list of vectors, [[nph, nphi, e0, x1, x2, ..., xn-1]]
corresponding to each entry in nph-nphi-composition space
"""
vecs = [[entry.npH, entry.nPhi, entry.energy] +
[entry.composition.get(elt) for elt in self.pbx_elts[:-1]]
for entry in pourbaix_entries]
vecs = np.array(vecs)
norms = np.transpose([[entry.normalization_factor
for entry in pourbaix_entries]])
vecs *= norms
return vecs
def _get_hull_in_nph_nphi_space(self, entries):
"""
Generates convex hull of pourbaix diagram entries in composition,
npH, and nphi space. This enables filtering of multi-entries
such that only compositionally stable combinations of entries
are included.
Args:
entries ([PourbaixEntry]): list of PourbaixEntries to construct
the convex hull
Returns: list of entries and stable facets corresponding to that
list of entries
"""
ion_entries = [entry for entry in entries
if entry.phase_type == "Ion"]
solid_entries = [entry for entry in entries
if entry.phase_type == "Solid"]
# Pre-filter solids based on min at each composition
logger.debug("Pre-filtering solids by min energy at each composition")
sorted_entries = sorted(
solid_entries, key=lambda x: (x.composition.reduced_composition,
x.entry.energy_per_atom))
grouped_by_composition = itertools.groupby(
sorted_entries, key=lambda x: x.composition.reduced_composition)
min_entries = [list(grouped_entries)[0]
for comp, grouped_entries in grouped_by_composition]
min_entries += ion_entries
logger.debug("Constructing nph-nphi-composition points for qhull")
vecs = self._convert_entries_to_points(min_entries)
maxes = np.max(vecs[:, :3], axis=0)
extra_point = np.concatenate(
[maxes, np.ones(self.dim) / self.dim], axis=0)
# Add padding for extra point
pad = 1000
extra_point[2] += pad
points = np.concatenate([vecs, np.array([extra_point])], axis=0)
logger.debug("Constructing convex hull in nph-nphi-composition space")
hull = ConvexHull(points, qhull_options="QJ i")
# Create facets and remove top
facets = [facet for facet in hull.simplices
if not len(points) - 1 in facet]
if self.dim > 1:
logger.debug("Filtering facets by pourbaix composition")
valid_facets = []
for facet in facets:
comps = vecs[facet][:, 3:]
full_comps = np.concatenate([
comps, 1 - np.sum(comps, axis=1).reshape(len(comps), 1)], axis=1)
# Ensure an compositional interior point exists in the simplex
if np.linalg.matrix_rank(full_comps) > self.dim:
valid_facets.append(facet)
else:
valid_facets = facets
return min_entries, valid_facets
def _preprocess_pourbaix_entries(self, entries, nproc=None):
"""
Generates multi-entries for pourbaix diagram
Args:
entries ([PourbaixEntry]): list of PourbaixEntries to preprocess
into MultiEntries
nproc (int): number of processes to be used in parallel
treatment of entry combos
Returns:
([MultiEntry]) list of stable MultiEntry candidates
"""
# Get composition
tot_comp = Composition(self._elt_comp)
min_entries, valid_facets = self._get_hull_in_nph_nphi_space(entries)
combos = []
for facet in valid_facets:
for i in range(1, self.dim + 2):
these_combos = list()
for combo in itertools.combinations(facet, i):
these_entries = [min_entries[i] for i in combo]
these_combos.append(frozenset(these_entries))
combos.append(these_combos)
all_combos = set(itertools.chain.from_iterable(combos))
list_combos = []
for i in all_combos:
list_combos.append(list(i))
all_combos = list_combos
multi_entries = []
# Parallel processing of multi-entry generation
if nproc is not None:
f = partial(self.process_multientry, prod_comp=tot_comp)
with Pool(nproc) as p:
multi_entries = list(tqdm(p.imap(f, all_combos),
total=len(all_combos)))
multi_entries = list(filter(bool, multi_entries))
else:
# Serial processing of multi-entry generation
for combo in tqdm(all_combos):
multi_entry = self.process_multientry(combo, prod_comp=tot_comp)
if multi_entry:
multi_entries.append(multi_entry)
return multi_entries
def _generate_multielement_entries(self, entries, nproc=None):
"""
Create entries for multi-element Pourbaix construction.
This works by finding all possible linear combinations
of entries that can result in the specified composition
from the initialized comp_dict.
Args:
entries ([PourbaixEntries]): list of pourbaix entries
to process into MultiEntries
nproc (int): number of processes to be used in parallel
treatment of entry combos
"""
N = len(self._elt_comp) # No. of elements
total_comp = Composition(self._elt_comp)
# generate all combinations of compounds that have all elements
entry_combos = [itertools.combinations(
entries, j + 1) for j in range(N)]
entry_combos = itertools.chain.from_iterable(entry_combos)
entry_combos = filter(lambda x: total_comp < MultiEntry(x).composition,
entry_combos)
# Generate and filter entries
processed_entries = []
total = sum([comb(len(entries), j + 1)
for j in range(N)])
if total > 1e6:
warnings.warn("Your pourbaix diagram includes {} entries and may "
"take a long time to generate.".format(total))
# Parallel processing of multi-entry generation
if nproc is not None:
f = partial(self.process_multientry, prod_comp=total_comp)
with Pool(nproc) as p:
processed_entries = list(tqdm(p.imap(f, entry_combos),
total=total))
processed_entries = list(filter(bool, processed_entries))
# Serial processing of multi-entry generation
else:
for entry_combo in entry_combos:
processed_entry = self.process_multientry(entry_combo, total_comp)
if processed_entry is not None:
processed_entries.append(processed_entry)
return processed_entries
@staticmethod
def process_multientry(entry_list, prod_comp, coeff_threshold=1e-4):
"""
Static method for finding a multientry based on
a list of entries and a product composition.
Essentially checks to see if a valid aqueous
reaction exists between the entries and the
product composition and returns a MultiEntry
with weights according to the coefficients if so.
Args:
entry_list ([Entry]): list of entries from which to
create a MultiEntry
prod_comp (Composition): composition constraint for setting
weights of MultiEntry
coeff_threshold (float): threshold of stoichiometric
coefficients to filter, if weights are lower than
this value, the entry is not returned
"""
dummy_oh = [Composition("H"), Composition("O")]
try:
# Get balanced reaction coeffs, ensuring all < 0 or conc thresh
# Note that we get reduced compositions for solids and non-reduced
# compositions for ions because ions aren't normalized due to
# their charge state.
entry_comps = [e.composition for e in entry_list]
rxn = Reaction(entry_comps + dummy_oh, [prod_comp])
react_coeffs = [-rxn.get_coeff(comp) for comp in entry_comps]
all_coeffs = react_coeffs + [rxn.get_coeff(prod_comp)]
# Check if reaction coeff threshold met for pourbaix compounds
# All reactant/product coefficients must be positive nonzero
if all([coeff > coeff_threshold for coeff in all_coeffs]):
return MultiEntry(entry_list, weights=react_coeffs)
else:
return None
except ReactionError:
return None
@staticmethod
def get_pourbaix_domains(pourbaix_entries, limits=None):
"""
Returns a set of pourbaix stable domains (i. e. polygons) in
pH-V space from a list of pourbaix_entries
This function works by using scipy's HalfspaceIntersection
function to construct all of the 2-D polygons that form the
boundaries of the planes corresponding to individual entry
gibbs free energies as a function of pH and V. Hyperplanes
of the form a*pH + b*V + 1 - g(0, 0) are constructed and
supplied to HalfspaceIntersection, which then finds the
boundaries of each pourbaix region using the intersection
points.
Args:
pourbaix_entries ([PourbaixEntry]): Pourbaix entries
with which to construct stable pourbaix domains
limits ([[float]]): limits in which to do the pourbaix
analysis
Returns:
Returns a dict of the form {entry: [boundary_points]}.
The list of boundary points are the sides of the N-1
dim polytope bounding the allowable ph-V range of each entry.
"""
if limits is None:
limits = [[-2, 16], [-4, 4]]
# Get hyperplanes
hyperplanes = [np.array([-PREFAC * entry.npH, -entry.nPhi,
0, -entry.energy]) * entry.normalization_factor
for entry in pourbaix_entries]
hyperplanes = np.array(hyperplanes)
hyperplanes[:, 2] = 1
max_contribs = np.max(np.abs(hyperplanes), axis=0)
g_max = np.dot(-max_contribs, [limits[0][1], limits[1][1], 0, 1])
# Add border hyperplanes and generate HalfspaceIntersection
border_hyperplanes = [[-1, 0, 0, limits[0][0]],
[1, 0, 0, -limits[0][1]],
[0, -1, 0, limits[1][0]],
[0, 1, 0, -limits[1][1]],
[0, 0, -1, 2 * g_max]]
hs_hyperplanes = np.vstack([hyperplanes, border_hyperplanes])
interior_point = np.average(limits, axis=1).tolist() + [g_max]
hs_int = HalfspaceIntersection(hs_hyperplanes, np.array(interior_point))
# organize the boundary points by entry
pourbaix_domains = {entry: [] for entry in pourbaix_entries}
for intersection, facet in zip(hs_int.intersections,
hs_int.dual_facets):
for v in facet:
if v < len(pourbaix_entries):
this_entry = pourbaix_entries[v]
pourbaix_domains[this_entry].append(intersection)
# Remove entries with no pourbaix region
pourbaix_domains = {k: v for k, v in pourbaix_domains.items() if v}
pourbaix_domain_vertices = {}
for entry, points in pourbaix_domains.items():
points = np.array(points)[:, :2]
# Initial sort to ensure consistency
points = points[np.lexsort(np.transpose(points))]
center = np.average(points, axis=0)
points_centered = points - center
# Sort points by cross product of centered points,
# isn't strictly necessary but useful for plotting tools
points_centered = sorted(points_centered,
key=cmp_to_key(lambda x, y: x[0] * y[1] - x[1] * y[0]))
points = points_centered + center
# Create simplices corresponding to pourbaix boundary
simplices = [Simplex(points[indices])
for indices in ConvexHull(points).simplices]
pourbaix_domains[entry] = simplices
pourbaix_domain_vertices[entry] = points
return pourbaix_domains, pourbaix_domain_vertices
def find_stable_entry(self, pH, V):
"""
Finds stable entry at a pH,V condition
Args:
pH (float): pH to find stable entry
V (float): V to find stable entry
Returns:
"""
energies_at_conditions = [e.normalized_energy_at_conditions(pH, V)
for e in self.stable_entries]
return self.stable_entries[np.argmin(energies_at_conditions)]
def get_decomposition_energy(self, entry, pH, V):
"""
Finds decomposition to most stable entries in eV/atom,
supports vectorized inputs for pH and V
Args:
entry (PourbaixEntry): PourbaixEntry corresponding to
compound to find the decomposition for
pH (float, [float]): pH at which to find the decomposition
V (float, [float]): voltage at which to find the decomposition
Returns:
Decomposition energy for the entry, i. e. the energy above
the "pourbaix hull" in eV/atom at the given conditions
"""
# Check composition consistency between entry and Pourbaix diagram:
pbx_comp = Composition(self._elt_comp).fractional_composition
entry_pbx_comp = Composition(
{elt: coeff for elt, coeff in entry.composition.items()
if elt not in ELEMENTS_HO}).fractional_composition
if entry_pbx_comp != pbx_comp:
raise ValueError("Composition of stability entry does not match "
"Pourbaix Diagram")
entry_normalized_energy = entry.normalized_energy_at_conditions(pH, V)
hull_energy = self.get_hull_energy(pH, V)
decomposition_energy = entry_normalized_energy - hull_energy
# Convert to eV/atom instead of eV/normalized formula unit
decomposition_energy /= entry.normalization_factor
decomposition_energy /= entry.composition.num_atoms
return decomposition_energy
def get_hull_energy(self, pH, V):
"""
Gets the minimum energy of the pourbaix "basin" that is formed
from the stable pourbaix planes. Vectorized.
Args:
pH (float or [float]): pH at which to find the hull energy
V (float or [float]): V at which to find the hull energy
Returns:
(float or [float]) minimum pourbaix energy at conditions
"""
all_gs = np.array([e.normalized_energy_at_conditions(
pH, V) for e in self.stable_entries])
base = np.min(all_gs, axis=0)
return base
def get_stable_entry(self, pH, V):
"""
Gets the stable entry at a given pH, V condition
Args:
pH (float): pH at a given condition
V (float): V at a given condition
Returns:
(PourbaixEntry or MultiEntry): pourbaix or multi-entry
corresponding ot the minimum energy entry at a given
pH, V condition
"""
all_gs = np.array([e.normalized_energy_at_conditions(
pH, V) for e in self.stable_entries])
return self.stable_entries[np.argmin(all_gs)]
@property
def stable_entries(self):
"""
Returns the stable entries in the Pourbaix diagram.
"""
return list(self._stable_domains.keys())
@property
def unstable_entries(self):
"""
Returns all unstable entries in the Pourbaix diagram
"""
return [e for e in self.all_entries if e not in self.stable_entries]
@property
def all_entries(self):
"""
Return all entries used to generate the pourbaix diagram
"""
return self._processed_entries
@property
def unprocessed_entries(self):
"""
Return unprocessed entries
"""
return self._unprocessed_entries
def as_dict(self, include_unprocessed_entries=False):
"""
Args:
include_unprocessed_entries (): Whether to include unprocessed entries.
Returns:
MSONable dict.
"""
if include_unprocessed_entries:
entries = [e.as_dict() for e in self._unprocessed_entries]
else:
entries = [e.as_dict() for e in self._processed_entries]
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entries": entries,
"comp_dict": self._elt_comp,
"conc_dict": self._conc_dict}
return d
@classmethod
def from_dict(cls, d):
"""
Args:
d (): Dict representation.
Returns:
PourbaixDiagram
"""
decoded_entries = MontyDecoder().process_decoded(d['entries'])
return cls(decoded_entries, d.get('comp_dict'),
d.get('conc_dict'))
class PourbaixPlotter:
"""
A plotter class for phase diagrams.
"""
def __init__(self, pourbaix_diagram):
"""
Args:
pourbaix_diagram (PourbaixDiagram): A PourbaixDiagram object.
"""
self._pbx = pourbaix_diagram
def show(self, *args, **kwargs):
"""
Shows the pourbaix plot
Args:
*args: args to get_pourbaix_plot
**kwargs: kwargs to get_pourbaix_plot
Returns:
None
"""
plt = self.get_pourbaix_plot(*args, **kwargs)
plt.show()
def get_pourbaix_plot(self, limits=None, title="",
label_domains=True, plt=None):
"""
Plot Pourbaix diagram.
Args:
limits: 2D list containing limits of the Pourbaix diagram
of the form [[xlo, xhi], [ylo, yhi]]
title (str): Title to display on plot
label_domains (bool): whether to label pourbaix domains
plt (pyplot): Pyplot instance for plotting
Returns:
plt (pyplot) - matplotlib plot object with pourbaix diagram
"""
if limits is None:
limits = [[-2, 16], [-3, 3]]
plt = plt or pretty_plot(16)
xlim = limits[0]
ylim = limits[1]
h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC],
[xlim[1], -xlim[1] * PREFAC]])
o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23],
[xlim[1], -xlim[1] * PREFAC + 1.23]])
neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])
V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])
ax = plt.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
lw = 3
plt.plot(h_line[0], h_line[1], "r--", linewidth=lw)
plt.plot(o_line[0], o_line[1], "r--", linewidth=lw)
plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=lw)
plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=lw)
for entry, vertices in self._pbx._stable_domain_vertices.items():
center = np.average(vertices, axis=0)
x, y = np.transpose(np.vstack([vertices, vertices[0]]))
plt.plot(x, y, 'k-', linewidth=lw)
if label_domains:
plt.annotate(generate_entry_label(entry), center, ha='center',
va='center', fontsize=20, color="b").draggable()
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.title(title, fontsize=20, fontweight='bold')
return plt
def plot_entry_stability(self, entry, pH_range=None, pH_resolution=100,
V_range=None, V_resolution=100, e_hull_max=1,
cmap='RdYlBu_r', **kwargs):
"""
Args:
entry ():
pH_range ():
pH_resolution ():
V_range ():
V_resolution ():
e_hull_max ():
cmap ():
**kwargs ():
Returns:
"""
if pH_range is None:
pH_range = [-2, 16]
if V_range is None:
V_range = [-3, 3]
# plot the Pourbaix diagram
plt = self.get_pourbaix_plot(**kwargs)
pH, V = np.mgrid[pH_range[0]:pH_range[1]:pH_resolution * 1j, V_range[0]:V_range[1]:V_resolution * 1j]
stability = self._pbx.get_decomposition_energy(entry, pH, V)
# Plot stability map
plt.pcolor(pH, V, stability, cmap=cmap, vmin=0, vmax=e_hull_max)
cbar = plt.colorbar()
cbar.set_label("Stability of {} (eV/atom)".format(
generate_entry_label(entry)))
# Set ticklabels
# ticklabels = [t.get_text() for t in cbar.ax.get_yticklabels()]
# ticklabels[-1] = '>={}'.format(ticklabels[-1])
# cbar.ax.set_yticklabels(ticklabels)
return plt
def domain_vertices(self, entry):
"""
Returns the vertices of the Pourbaix domain.
Args:
entry: Entry for which domain vertices are desired
Returns:
list of vertices
"""
return self._pbx._stable_domain_vertices[entry]
def generate_entry_label(entry):
"""
Generates a label for the pourbaix plotter
Args:
entry (PourbaixEntry or MultiEntry): entry to get a label for
"""
if isinstance(entry, MultiEntry):
return " + ".join([latexify_ion(latexify(e.name)) for e in entry.entry_list])
else:
return latexify_ion(latexify(entry.name))
def latexify_ion(formula):
"""
Convert a formula to latex format.
Args:
formula (str): Formula
Returns:
Latex string.
"""
return re.sub(r"()\[([^)]*)\]", r"\1$^{\2}$", formula)
|
mbkumar/pymatgen
|
pymatgen/analysis/pourbaix_diagram.py
|
Python
|
mit
| 41,377
|
[
"pymatgen"
] |
e3ec5d252847d5a2372b5061da9cff326e5ad2d442647c2c153b6b9f20f31a62
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np
from .mh import MHMove
__all__ = ["GaussianMove"]
class GaussianMove(MHMove):
"""A Metropolis step with a Gaussian proposal function.
Args:
cov: The covariance of the proposal function. This can be a scalar,
vector, or matrix and the proposal will be assumed isotropic,
axis-aligned, or general respectively.
mode (Optional): Select the method used for updating parameters. This
can be one of ``"vector"``, ``"random"``, or ``"sequential"``. The
``"vector"`` mode updates all dimensions simultaneously,
``"random"`` randomly selects a dimension and only updates that
one, and ``"sequential"`` loops over dimensions and updates each
one in turn.
factor (Optional[float]): If provided the proposal will be made with a
standard deviation uniformly selected from the range
``exp(U(-log(factor), log(factor))) * cov``. This is invalid for
the ``"vector"`` mode.
Raises:
ValueError: If the proposal dimensions are invalid or if any of any of
the other arguments are inconsistent.
"""
def __init__(self, cov, mode="vector", factor=None):
# Parse the proposal type.
try:
float(cov)
except TypeError:
cov = np.atleast_1d(cov)
if len(cov.shape) == 1:
# A diagonal proposal was given.
ndim = len(cov)
proposal = _diagonal_proposal(np.sqrt(cov), factor, mode)
elif len(cov.shape) == 2 and cov.shape[0] == cov.shape[1]:
# The full, square covariance matrix was given.
ndim = cov.shape[0]
proposal = _proposal(cov, factor, mode)
else:
raise ValueError("Invalid proposal scale dimensions")
else:
# This was a scalar proposal.
ndim = None
proposal = _isotropic_proposal(np.sqrt(cov), factor, mode)
super(GaussianMove, self).__init__(proposal, ndim=ndim)
class _isotropic_proposal(object):
allowed_modes = ["vector", "random", "sequential"]
def __init__(self, scale, factor, mode):
self.index = 0
self.scale = scale
if factor is None:
self._log_factor = None
else:
if factor < 1.0:
raise ValueError("'factor' must be >= 1.0")
self._log_factor = np.log(factor)
if mode not in self.allowed_modes:
raise ValueError(("'{0}' is not a recognized mode. "
"Please select from: {1}")
.format(mode, self.allowed_modes))
self.mode = mode
def get_factor(self, rng):
if self._log_factor is None:
return 1.0
return np.exp(rng.uniform(-self._log_factor, self._log_factor))
def get_updated_vector(self, rng, x0):
return x0 + self.get_factor(rng) * self.scale * rng.randn(*(x0.shape))
def __call__(self, rng, x0):
nw, nd = x0.shape
xnew = self.get_updated_vector(rng, x0)
if self.mode == "random":
m = (range(nw), rng.randint(x0.shape[-1], size=nw))
elif self.mode == "sequential":
m = (range(nw), self.index % nd + np.zeros(nw, dtype=int))
self.index = (self.index + 1) % nd
else:
return xnew, np.zeros(nw)
x = np.array(x0)
x[m] = xnew[m]
return x, np.zeros(nw)
class _diagonal_proposal(_isotropic_proposal):
def get_updated_vector(self, rng, x0):
return x0 + self.get_factor(rng) * self.scale * rng.randn(*(x0.shape))
class _proposal(_isotropic_proposal):
allowed_modes = ["vector"]
def get_updated_vector(self, rng, x0):
return x0 + self.get_factor(rng) * rng.multivariate_normal(
np.zeros(len(self.scale)), self.scale)
|
dfm/emcee3
|
emcee3/moves/gaussian.py
|
Python
|
mit
| 4,020
|
[
"Gaussian"
] |
e3ef1c8b799d4bd82a5b5e89f5ce375b1ade3526a825b7a9e0bddd42c474923a
|
# -*- coding: utf-8 -*-
"""
Polycrystalline diffraction pattern simulation
==============================================
"""
import numpy as np
from crystals.affine import change_basis_mesh
from ..voigt import pseudo_voigt
from .structure_factors import structure_factor
def powdersim(crystal, q, fwhm_g=0.03, fwhm_l=0.06, **kwargs):
"""
Simulates polycrystalline diffraction pattern.
Parameters
----------
crystal : `skued.structure.Crystal`
Crystal from which to diffract.
q : `~numpy.ndarray`, shape (N,)
Range of scattering vector norm over which to compute the diffraction pattern [1/Angs].
fwhm_g, fwhm_l : float, optional
Full-width at half-max of the Gaussian and Lorentzian parts of the Voigt profile.
See `skued.pseudo_voigt` for more details.
Returns
-------
pattern : `~numpy.ndarray`, shape (N,)
Diffraction pattern
"""
refls = np.vstack(tuple(crystal.bounded_reflections(q.max())))
h, k, l = np.hsplit(refls, 3)
Gx, Gy, Gz = change_basis_mesh(
h, k, l, basis1=crystal.reciprocal_vectors, basis2=np.eye(3)
)
qs = np.sqrt(Gx ** 2 + Gy ** 2 + Gz ** 2)
intensities = np.absolute(structure_factor(crystal, h, k, l)) ** 2
pattern = np.zeros_like(q)
for qi, i in zip(qs, intensities):
pattern += i * pseudo_voigt(q, qi, fwhm_g, fwhm_l)
return pattern
|
LaurentRDC/scikit-ued
|
skued/simulation/powdersim.py
|
Python
|
gpl-3.0
| 1,403
|
[
"CRYSTAL",
"Gaussian"
] |
aad3e2c0f82ac24f7d80821ccac33a89003ec8d564753f991fd0b81f25b6ed7f
|
#!/usr/bin/env python
# (C) Copyright IBM Corporation 2004, 2005
# (C) Copyright Apple Inc. 2011
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Jeremy Huddleston <jeremyhu@apple.com>
#
# Based on code ogiginally by:
# Ian Romanick <idr@us.ibm.com>
import license
import gl_XML, glX_XML
import sys, getopt
header = """/* GLXEXT is the define used in the xserver when the GLX extension is being
* built. Hijack this to determine whether this file is being built for the
* server or the client.
*/
#ifdef HAVE_DIX_CONFIG_H
#include <dix-config.h>
#endif
#if (defined(GLXEXT) && defined(HAVE_BACKTRACE)) \\
|| (!defined(GLXEXT) && defined(DEBUG) && !defined(_WIN32_WCE) && !defined(__CYGWIN__) && !defined(__MINGW32__) && !defined(__OpenBSD__) && !defined(__NetBSD__) && !defined(__DragonFly__))
#define USE_BACKTRACE
#endif
#ifdef USE_BACKTRACE
#include <execinfo.h>
#endif
#ifndef _WIN32
#include <dlfcn.h>
#endif
#include <stdlib.h>
#include <stdio.h>
#include "main/glheader.h"
#include "glapi.h"
#include "glapitable.h"
#ifdef GLXEXT
#include "os.h"
#endif
static void
__glapi_gentable_NoOp(void) {
const char *fstr = "Unknown";
/* Silence potential GCC warning for some #ifdef paths.
*/
(void) fstr;
#if defined(USE_BACKTRACE)
#if !defined(GLXEXT)
if (getenv("MESA_DEBUG") || getenv("LIBGL_DEBUG"))
#endif
{
void *frames[2];
if(backtrace(frames, 2) == 2) {
Dl_info info;
dladdr(frames[1], &info);
if(info.dli_sname)
fstr = info.dli_sname;
}
#if !defined(GLXEXT)
fprintf(stderr, "Call to unimplemented API: %s\\n", fstr);
#endif
}
#endif
#if defined(GLXEXT)
LogMessage(X_ERROR, "GLX: Call to unimplemented API: %s\\n", fstr);
#endif
}
static void
__glapi_gentable_set_remaining_noop(struct _glapi_table *disp) {
GLuint entries = _glapi_get_dispatch_table_size();
void **dispatch = (void **) disp;
int i;
/* ISO C is annoying sometimes */
union {_glapi_proc p; void *v;} p;
p.p = __glapi_gentable_NoOp;
for(i=0; i < entries; i++)
if(dispatch[i] == NULL)
dispatch[i] = p.v;
}
struct _glapi_table *
_glapi_create_table_from_handle(void *handle, const char *symbol_prefix) {
struct _glapi_table *disp = calloc(1, _glapi_get_dispatch_table_size() * sizeof(_glapi_proc));
char symboln[512];
if(!disp)
return NULL;
if(symbol_prefix == NULL)
symbol_prefix = "";
"""
footer = """
__glapi_gentable_set_remaining_noop(disp);
return disp;
}
"""
body_template = """
if(!disp->%(name)s) {
void ** procp = (void **) &disp->%(name)s;
snprintf(symboln, sizeof(symboln), "%%s%(entry_point)s", symbol_prefix);
#ifdef _WIN32
*procp = GetProcAddress(handle, symboln);
#else
*procp = dlsym(handle, symboln);
#endif
}
"""
class PrintCode(gl_XML.gl_print_base):
def __init__(self):
gl_XML.gl_print_base.__init__(self)
self.name = "gl_gen_table.py (from Mesa)"
self.license = license.bsd_license_template % ( \
"""Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
(C) Copyright IBM Corporation 2004, 2005
(C) Copyright Apple Inc 2011""", "BRIAN PAUL, IBM")
return
def get_stack_size(self, f):
size = 0
for p in f.parameterIterator():
if p.is_padding:
continue
size += p.get_stack_size()
return size
def printRealHeader(self):
print header
return
def printRealFooter(self):
print footer
return
def printBody(self, api):
for f in api.functionIterateByOffset():
for entry_point in f.entry_points:
vars = { 'entry_point' : entry_point,
'name' : f.name }
print body_template % vars
return
def show_usage():
print "Usage: %s [-f input_file_name]" % sys.argv[0]
sys.exit(1)
if __name__ == '__main__':
file_name = "gl_API.xml"
try:
(args, trail) = getopt.getopt(sys.argv[1:], "m:f:")
except Exception,e:
show_usage()
for (arg,val) in args:
if arg == "-f":
file_name = val
printer = PrintCode()
api = gl_XML.parse_GL_API(file_name, glX_XML.glx_item_factory())
printer.Print(api)
|
execunix/vinos
|
xsrc/external/mit/MesaLib/dist/src/mapi/glapi/gen/gl_gentable.py
|
Python
|
apache-2.0
| 5,438
|
[
"Brian"
] |
a22b45804560502e7df353707bc414c41e1471ff83e80a450efd228d94eb073f
|
#!/usr/bin/env python3
# -*-coding:Utf-8 -*
import pyfits
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from scipy.integrate import quad
"""Script used to load SMF from Davizon+17 paper with uncertainties and marke
abundance matching with it
"""
redshifts = np.array([0.2, 0.5, 0.8, 1.1, 1.5, 2, 2.5, 3, 3.5, 4.5, 5.5])
numzbin = np.size(redshifts)-1
"""Load the SMF"""
smf = []
for i in range(10):
# smf.append(np.loadtxt('../Data/Davidzon/Davidzon+17_SMF_V3.0/mf_mass2b_fl5b_tot_VmaxFit2D'+str(i)+'.dat'))
smf.append(np.loadtxt('../Data/Davidzon/schechter_fixedMs/mf_mass2b_fl5b_tot_VmaxFit2E'
+ str(i) + '.dat'))
"""Plot"""
plt.figure()
for i in range(10):
plt.fill_between(smf[i][:,0], smf[i][:,2], smf[i][:,3], alpha=0.5,
label=str(redshifts[i])+'<z<'+str(redshifts[i+1]))
plt.ylim(-6,-2)
plt.xlim(9,12)
plt.title('Davidzon+17 Schechter fits')
plt.ylabel('Log($\phi$) [Log($Mpc^{-3}$)]')
plt.xlabel('Log($M_{*}$) [Log($M_{\odot}$)]')
plt.legend(loc=3)
plt.show()
"""Compute Galaxy Cumulative Density
"""
# Compute integrals to have the cumulative density = int(phi(m)dm)
numpoints = np.size(smf[0][:,0])
Nstar = np.empty([numzbin, numpoints])
Nstarminus = np.empty([numzbin, numpoints])
Nstarplus = np.empty([numzbin, numpoints])
for i in range(numzbin):
for j in range(numpoints):
Nstar[i,j]= np.trapz(10**smf[i][j:,1], smf[i][j:,0])
Nstarminus[i,j] = np.trapz(10**smf[i][j:,2], smf[i][j:,0])
Nstarplus[i,j] = np.trapz(10**smf[i][j:,3], smf[i][j:,0])
"""Plot"""
plt.figure()
for i in range(10):
plt.fill_between(smf[i][:,0], Nstarminus[i,:], Nstarplus[i,:], alpha=0.5,
label=str(redshifts[i])+'<z<'+str(redshifts[i+1]))
## To compare with my own density computations on JeanCoupon catalog
## plt.scatter(np.linspace(mmingal, mmaxgal, num=n),Ngal[i], marker='+', color='red')
plt.yscale('log')
plt.ylim(10**-6, 0.1)
plt.xlim(8, 12)
plt.ylabel('N(>$M_{*}$), [$Mpc^{-3}$]')
plt.xlabel('Log(Stellar Mass), [Log($M_{\odot}$)]')
plt.legend(loc=3)
plt.show()
"""Load Density of DM halos from Bolshoï simulation"""
Nhalo = np.load('Nhalo.npy')
MvirNhalo = np.load('MvirNhalo.npy')
"""Interpolate
"""
MstarIary = []
MstarIaryPlus = []
MstarIaryMinus = []
Mhalo = []
for i in range(numzbin):
"""do the interpolation for each redshift bin, in order to have the functions
StellarMass(abundane) and HaloMass(abundance)"""
MstarIary.append(interp1d(Nstar[i,:], 10**smf[i][:,0]))
MstarIaryMinus.append(interp1d(Nstarminus[i,:], 10**smf[i][:,0]))
MstarIaryPlus.append(interp1d(Nstarplus[i,:], 10**smf[i][:,0]))
Mhalo.append(interp1d(Nhalo[i][:], MvirNhalo))
"""Compute M*/Mh with uncertainties"""
n_fit=1000
x = np.empty([numzbin, n_fit])
xm =np.empty([numzbin, n_fit])
ym =np.empty([numzbin, n_fit])
yminus =np.empty([numzbin, n_fit])
yplus =np.empty([numzbin, n_fit])
for i in range(numzbin):
print(i)
x[i] = np.geomspace(max(min(Nstar[i, Nstar[i,:]>0]),
Nstarminus[i,-1], Nstarplus[i,-1], Nhalo[i, -1]),
min(Nstar[i, 0], Nstarminus[i,0], Nstarplus[i,0], Nhalo[i, 0]), 1000)
x[i][0] = max(min(Nstar[i, Nstar[i,:]>0]), Nstarminus[i,-1], Nstarplus[i,-1], Nhalo[i, -1])
x[i][-1] = min(Nstar[i, 0], Nstarminus[i,0], Nstarplus[i,0], Nhalo[i, 0])
xm[i] = Mhalo[i](x[i])
ym[i] = MstarIary[i](x[i])/Mhalo[i](x[i])
yminus[i] = MstarIaryMinus[i](x[i])/Mhalo[i](x[i])
yplus[i] = MstarIaryPlus[i](x[i])/Mhalo[i](x[i])
"""Plot"""
index_min = np.empty(numzbin).astype('int')
for i in range(numzbin):
index_min[i] = np.argmin(ym[i, :950])
for i in range(numzbin):
plt.figure()
#index_min = np.argmin(Nhalo[i]>0)
plt.plot(xm[i][index_min[i]:], ym[i][index_min[i]:], label=str(redshifts[i])+'<z<'+str(redshifts[i+1]))
plt.fill_between(xm[i], yminus[i], yplus[i], alpha=0.5)
plt.scatter(Mpeak[i], ym[i][Mpeak_idx[i]])
plt.plot((Mpeakmin[i], Mpeakmax[i]), (ym[i][Mpeak_idx[i]], ym[i][Mpeak_idx[i]] ))
plt.legend()
plt.xlim(2.8*10**9,10**15)
plt.ylim(0.9*10**-3, 0.11)
plt.ylabel('$M_{*}/M_{h}$', size=20)
plt.xlabel('$M_{h}$ [$M_{\odot}]$', size=20)
plt.xscale('log');plt.yscale('log')
plt.tight_layout()
# plt.title('IariDavidzon Mass Function vs Bolshoï simulation')
plt.show()
"""Compute Mpeak and uncertainties"""
# Mpeak correspond to the Mh with the highest M*/Mh
# Need to restrain the interval tto look for the local minimum
# -> it is not very clean to use 640 empiriicaly, should find a better method
Mpeak = np.empty(numzbin)
Mpeakmin = np.empty(numzbin)
Mpeakmax = np.empty(numzbin)
Mpeak_idx = np.empty(numzbin)
for i in range(numzbin):
Mpeak_idx[i] = np.argmax(ym[i][650:])+650
Mpeak_idx = Mpeak_idx.astype('int')
Mpeak[i] = xm[i][Mpeak_idx[i]]
Mpeakmax[i] = xm[i][np.argmin(np.abs((yplus[i][640:Mpeak_idx[i]]-ym[i][Mpeak_idx[i]])))+640]
Mpeakmin[i] = xm[i][np.argmin(np.abs((yplus[i][Mpeak_idx[i]:]-ym[i][Mpeak_idx[i]])))+Mpeak_idx[i]]
# for i in range(numzbin):
# plt.scatter(Mpeak[i], ym[i][Mpeak_idx[i]])
# plt.plot((Mpeakmin[i], Mpeakmax[i]), (ym[i][Mpeak_idx[i]], ym[i][Mpeak_idx[i]] ))
#
# for i in range(numzbin):
# plt.plot(ym[i], label=str(redshifts[i])+'<z<'+str(redshifts[i+1]))
# plt.scatter(Mpeak_idx[i], ym[i][Mpeak_idx[i]])
# plt.scatter(np.argmin(np.abs((yplus[i][640:Mpeak_idx[i]]-ym[i][Mpeak_idx[i]])))+640, ym[i][np.argmin(np.abs((yplus[i][600:Mpeak_idx[i]]-ym[i][Mpeak_idx[i]])))+600])
# plt.yscale('log')
#
# plt.scatter(Mpeak[i], ym[i][Mpeak_idx[i]])
# plt.plot((Mpeakmin[i], Mpeakmax[i]), (ym[i][Mpeak_idx[i]], ym[i][Mpeak_idx[i]] ))
plt.errorbar((redshifts[1:]+redshifts[:-1])/2, Mpeak[:],
yerr=[Mpeak[:]-Mpeakmin[:], Mpeakmax[:]-Mpeak[:]],
xerr=((redshifts[1:]+redshifts[:-1])/2-redshifts[:-1],
redshifts[1:] -(redshifts[1:]+redshifts[:-1])/2),
fmt='o', capsize=2)
plt.yscale('log')
plt.xlabel('Redshift', fontsize=15)
plt.ylabel('$M_{peak}$, [$M_{\odot}$]', fontsize=15)
|
Gorbagzog/StageIAP
|
IaryDavidzonSMF.py
|
Python
|
gpl-3.0
| 6,050
|
[
"Galaxy"
] |
6a7b373dc663e2d56c0db16cf15f7ad8a018fffa740e39d9f0cf651c06997223
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic: 解析与分析Python源码
Desc :
"""
import ast
class CodeAnalyzer(ast.NodeVisitor):
def __init__(self):
self.loaded = set()
self.stored = set()
self.deleted = set()
def visit_Name(self, node):
if isinstance(node.ctx, ast.Load):
self.loaded.add(node.id)
elif isinstance(node.ctx, ast.Store):
self.stored.add(node.id)
elif isinstance(node.ctx, ast.Del):
self.deleted.add(node.id)
# Sample usage
if __name__ == '__main__':
# Some Python code
code = '''
for i in range(10):
print(i)
del i
'''
# Parse into an AST
top = ast.parse(code, mode='exec')
# Feed the AST to analyze name usage
c = CodeAnalyzer()
c.visit(top)
print('Loaded:', c.loaded)
print('Stored:', c.stored)
print('Deleted:', c.deleted)
# namelower.py
import ast
import inspect
# Node visitor that lowers globally accessed names into
# the function body as local variables.
class NameLower(ast.NodeVisitor):
def __init__(self, lowered_names):
self.lowered_names = lowered_names
def visit_FunctionDef(self, node):
# Compile some assignments to lower the constants
code = '__globals = globals()\n'
code += '\n'.join("{0} = __globals['{0}']".format(name)
for name in self.lowered_names)
code_ast = ast.parse(code, mode='exec')
# Inject new statements into the function body
node.body[:0] = code_ast.body
# Save the function object
self.func = node
# Decorator that turns global names into locals
def lower_names(*namelist):
def lower(func):
srclines = inspect.getsource(func).splitlines()
# Skip source lines prior to the @lower_names decorator
for n, line in enumerate(srclines):
if '@lower_names' in line:
break
src = '\n'.join(srclines[n + 1:])
# Hack to deal with indented code
if src.startswith((' ', '\t')):
src = 'if 1:\n' + src
top = ast.parse(src, mode='exec')
# Transform the AST
cl = NameLower(namelist)
cl.visit(top)
# Execute the modified AST
temp = {}
exec(compile(top, '', 'exec'), temp, temp)
# Pull out the modified code object
func.__code__ = temp[func.__name__].__code__
return func
return lower
|
tongpo/Holle-World
|
py/python3-cookbook/cookbook/c09/p24_analyze_source.py
|
Python
|
gpl-2.0
| 2,488
|
[
"VisIt"
] |
65871c9d4f6ac89e8e2299c2801bc591c4e194697d122cf8b6f374cf1a487ed2
|
# Copyright (C) 2009 by Eric Talevich (eric.talevich@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""I/O function wrappers for Bio.Nexus trees."""
__docformat__ = "epytext en"
from itertools import chain
from Bio.Nexus import Nexus
from Bio.Phylo import Newick, NewickIO
# Structure of a Nexus tree-only file
NEX_TEMPLATE = """\
#NEXUS
Begin Taxa;
Dimensions NTax=%(count)d;
TaxLabels %(labels)s;
End;
Begin Trees;
%(trees)s
End;
"""
# 'index' starts from 1; 'tree' is the Newick tree string
TREE_TEMPLATE = "Tree tree%(index)d=[&U]%(tree)s;"
def parse(handle):
"""Parse the trees in a Nexus file.
Uses the old Nexus.Trees parser to extract the trees, converts them back to
plain Newick trees, and feeds those strings through the new Newick parser.
This way we don't have to modify the Nexus module yet. When we're satisfied
with Bio.Phylo, we can change Nexus to use the new NewickIO parser directly.
"""
nex = Nexus.Nexus(handle)
# NB: Once Nexus.Trees is modified to use Tree.Newick objects, do this:
# return iter(nex.trees)
# Until then, convert the Nexus.Trees.Tree object hierarchy:
def node2clade(nxtree, node):
subclades = [node2clade(nxtree, nxtree.node(n)) for n in node.succ]
return Newick.Clade(
branch_length=node.data.branchlength,
name=node.data.taxon,
clades=subclades,
confidence=node.data.support,
comment=node.data.comment)
for nxtree in nex.trees:
newroot = node2clade(nxtree, nxtree.node(nxtree.root))
yield Newick.Tree(root=newroot, rooted=nxtree.rooted, name=nxtree.name,
weight=nxtree.weight)
def write(obj, handle, **kwargs):
"""Write a new Nexus file containing the given trees.
Uses a simple Nexus template and the NewickIO writer to serialize just the
trees and minimal supporting info needed for a valid Nexus file.
"""
trees = list(obj)
writer = NewickIO.Writer(trees)
nexus_trees = [TREE_TEMPLATE % {'index': idx+1, 'tree': nwk}
for idx, nwk in enumerate(
writer.to_strings(plain=False, plain_newick=True,
**kwargs))]
tax_labels = map(str, chain(*(t.get_terminals() for t in trees)))
text = NEX_TEMPLATE % {
'count': len(tax_labels),
'labels': ' '.join(tax_labels),
'trees': '\n'.join(nexus_trees),
}
handle.write(text)
return len(nexus_trees)
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/Phylo/NexusIO.py
|
Python
|
gpl-2.0
| 2,695
|
[
"Biopython"
] |
b0fb7c17574dbd2a87b13efdf51454fdedf80e0e2285132b170b7b450f2c0011
|
# -*- coding: utf-8 -*-
from gettext import gettext as _
LEVEL1 = [
1,
_('Countries'),
['lineasDepto'],
[],
[
(_('United States'), 1, _('United States'), _('Is in the center')),
(_('Canada'), 1, _('Canada'), _('Is north')),
(_('Alaska'), 1, _('Alaska'), _('Is northwest')),
(_('México'), 1, _('México'), _('Is south')),
(_('the %s') % _('Revillagigedo Islands'), 1, _('Revillagigedo Islands'), _('Is southwest')),
(_('the %s') % _('Baffin Island'), 1, _('Baffin Island'), _('Is northeast')),
(_('the %s') % _('Victoria Island'), 1, _('Victoria Island'), _('Is north')),
(_('the %s') % _('Banks Island'), 1, _('Banks Island'), _('Is north')),
(_('the %s') % _('Ellesmere Island'), 1, _('Ellesmere Island'), _('Is north')),
(_('the %s') % _('Elizabeth Island'), 1, _('Elizabeth Island'), _('Is north'))
]
]
LEVEL2 = [
2,
_('Cities'),
['lineasDepto', 'capitales', 'ciudades'],
[],
[
(_('México'), _('Is south')),
(_('Ottawa'), _('Is east')),
(_('Washington'), _('Is east')),
(_('Acapulco'), _('Is south')),
(_('Albuquerque'), _('Is in the center')),
(_('Anchorage'), _('Is northwest')),
(_('Atlanta'), _('Is east')),
(_('Barrow'), _('Is northwest')),
(_('Bethel'), _('Is northwest')),
(_('Boston'), _('Is east')),
(_('Calgary'), _('Is in the center')),
(_('Cambridge Bay'), _('Is north')),
(_('Cancún'), _('Is southeast')),
(_('Charleston'), _('Is east')),
(_('Charlottetown'), _('Is northeast')),
(_('Chicago'), _('Is in the center')),
(_('Chihuahua'), _('Is south')),
(_('Churchill'), _('Is north')),
(_('Cleveland'), _('Is east')),
(_('Columbus'), _('Is east')),
(_('Dallas'), _('Is in the center')),
(_('Dawson'), _('Is northwest')),
(_('Denver'), _('Is in the center')),
(_('Detroit'), _('Is east')),
(_('Echo Bay'), _('Is north')),
(_('Edmonton'), _('Is north')),
(_('El Paso'), _('Is south')),
(_('Fairbanks'), _('Is northwest')),
(_('Fort George'), _('Is northeast')),
(_('Fredericton'), _('Is east')),
(_('Frobisher Bay'), _('Is northeast')),
(_('Goose Bay'), _('Is northeast')),
(_('Guadalajara'), _('Is south')),
(_('Halifax'), _('Is east')),
(_('Hay River'), _('Is north')),
(_('Hermosillo'), _('Is southwest')),
(_('Houston'), _('Is southeast')),
(_('Indianapolis'), _('Is east')),
(_('Inuvik'), _('Is northwest')),
(_('Ivujivik'), _('Is northeast')),
(_('Jacksonville'), _('Is southeast')),
(_('Kansas City'), _('Is in the center')),
(_('Kaujuitoq'), _('Is north')),
(_('Kodiak'), _('Is northwest')),
(_('La Paz'), _('Is southwest')),
(_('Las Vegas'), _('Is west')),
(_('Los Angeles'), _('Is west')),
(_('Mérida'), _('Is southeast')),
(_('Matamoros'), _('Is southeast')),
(_('Mazatlán'), _('Is south')),
(_('Memphis'), _('Is in the center')),
(_('Miami'), _('Is southeast')),
(_('Minneapolis'), _('Is in the center')),
(_('Monterrey'), _('Is south')),
(_('Montréal'), _('Is east')),
(_('Moosonee'), _('Is northeast')),
(_('New Orleans'), _('Is southeast')),
(_('New York'), _('Is east')),
(_('Nome'), _('Is northwest')),
(_('Norfolk'), _('Is east')),
(_('Oaxaca'), _('Is south')),
(_('Oklahoma City'), _('Is in the center')),
(_('Philadelphia'), _('Is east')),
(_('Phoenix'), _('Is southwest')),
(_('Portland'), _('Is west')),
(_('Prince George'), _('Is northwest')),
(_('Prince Rupert'), _('Is northwest')),
(_('Prudhoe Bay'), _('Is northwest')),
(_('Puebla'), _('Is south')),
(_('Québec'), _('Is east')),
(_('Regina'), _('Is in the center')),
(_('Repulse Bay'), _('Is north')),
(_('Sacramento'), _('Is west')),
(_("Saint John's"), _('Is northeast')),
(_('Saint John'), _('Is east')),
(_('Salt Lake City'), _('Is in the center')),
(_('San Antonio'), _('Is south')),
(_('San Diego'), _('Is west')),
(_('San Francisco'), _('Is west')),
(_('Saskatoon'), _('Is north')),
(_('Schefferville'), _('Is northeast')),
(_('Seattle'), _('Is west')),
(_('St. Louis'), _('Is in the center')),
(_('Sydney'), _('Is northeast')),
(_('Tampico'), _('Is south')),
(_('Thunder Bay'), _('Is in the center')),
(_('Toronto'), _('Is east')),
(_('Torreón'), _('Is north')),
(_('Valdez'), _('Is northwest')),
(_('Vancouver'), _('Is west')),
(_('Veracruz'), _('Is south')),
(_('Watson Lake'), _('Is northwest')),
(_('Whitehorse'), _('Is northwest')),
(_('Winnipeg'), _('Is in the center')),
(_('Yellowknife'), _('Is north'))
]
]
LEVEL3 = [
4,
_('Waterways'),
['rios'],
[],
[
(_('Grande River'), _('Is south')),
(_('Colorado River'), _('Is southwest')),
(_('Arkansas River'), _('Is in the center')),
(_('Missouri River'), _('Is in the center')),
(_('Mississippi River'), _('Is southeast')),
(_('Ohio River'), _('Is east')),
(_('Snake River'), _('Is west')),
(_('Columbia River'), _('Is west')),
(_('Saskatchewan River'), _('Is north')),
(_('Nelson River'), _('Is north')),
(_('Yukon River'), _('Is northwest')),
(_('Mackenzie River'), _('Is northwest')),
(_('Peace River'), _('Is northwest')),
(_('Saint Lawrence River'), _('Is east')),
(_('Hudson Bay'), _('Is north')),
(_('Baffin Bay'), _('Is north')),
(_('Strait of Davis'), _('Is northeast')),
(_('Labrador Sea'), _('Is northeast')),
(_('Beaufort Sea'), _('Is northwest')),
(_('Chukci Sea'), _('Is northwest')),
(_('Arctic Ocean'), _('Is north')),
(_('Bering Sea'), _('Is northwest')),
(_('Gulf of Alaska'), _('Is northwest')),
(_('Gulf of California'), _('Is southwest')),
(_('Campeche Bay'), _('Is south')),
(_('Gulf of Mexico'), _('Is southeast')),
(_('Caribbean Sea'), _('Is southeast')),
(_('Atlantic Ocean'), _('Is east')),
(_('St. Lawrence Gulf'), _('Is northeast')),
(_('Pacific Ocean'), _('Is west')),
(_('Lake Winnipeg'), _('Is north')),
(_('Lake Superior'), _('Is in the center')),
(_('Lake Michigan'), _('Is in the center')),
(_('Lake Huron'), _('Is in the center')),
(_('Norwegian Sea'), _('Is northeast'))
]
]
LEVELS = [LEVEL1, LEVEL2, LEVEL3]
|
AlanJAS/iknowAmerica
|
recursos/0adelnorte/datos/levels.py
|
Python
|
gpl-3.0
| 6,367
|
[
"COLUMBUS"
] |
49bd571cfd74f0083f197252b5f9b535100a76952e9261befdf62a4668f1adc0
|
# -*- coding: utf-8 -*-
"""
toolbox.py
Create Oficina Toolbar in Sugar
Copyright 2007, NATE-LSI-EPUSP
Oficina is developed in Brazil at Escola Politécnica of
Universidade de São Paulo. NATE is part of LSI (Integrable
Systems Laboratory) and stands for Learning, Work and Entertainment
Research Group. Visit our web page:
www.lsi.usp.br/nate
Suggestions, bugs and doubts, please email oficina@lsi.usp.br
Oficina is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation version 2 of
the License.
Oficina is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with Oficina; if not, write to the
Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
Boston, MA 02110-1301 USA.
The copy of the GNU General Public License is found in the
COPYING file included in the source distribution.
Authors:
Joyce Alessandra Saul (joycealess@gmail.com)
Andre Mossinato (andremossinato@gmail.com)
Nathalia Sautchuk Patrício (nathalia.sautchuk@gmail.com)
Pedro Kayatt (pekayatt@gmail.com)
Rafael Barbolo Lopes (barbolo@gmail.com)
Alexandre A. Gonçalves Martinazzo (alexandremartinazzo@gmail.com)
Colaborators:
Bruno Gola (brunogola@gmail.com)
Group Manager:
Irene Karaguilla Ficheman (irene@lsi.usp.br)
Cientific Coordinator:
Roseli de Deus Lopes (roseli@lsi.usp.br)
UI Design (OLPC):
Eben Eliason (eben@laptop.org)
Project Coordinator (OLPC):
Manusheel Gupta (manu@laptop.org)
Project Advisor (OLPC):
Walter Bender (walter@laptop.org)
"""
from gettext import gettext as _
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import Pango
import logging
from sugar3.activity.widgets import EditToolbar
from sugar3.graphics.toolcombobox import ToolComboBox
from sugar3.graphics.toolbutton import ToolButton
from sugar3.graphics.radiotoolbutton import RadioToolButton
from sugar3.graphics.toggletoolbutton import ToggleToolButton
from sugar3.graphics.objectchooser import ObjectChooser
try:
from sugar3.graphics.objectchooser import FILTER_TYPE_GENERIC_MIME
except:
FILTER_TYPE_GENERIC_MIME = 'generic_mime'
from widgets import ButtonStrokeColor
from sugar3.graphics.colorbutton import ColorToolButton
from sugar3.graphics.radiopalette import RadioPalette
from sugar3.graphics.palettemenu import PaletteMenuBox
from sugar3.graphics.palettemenu import PaletteMenuItem
from sugar3.graphics import style
from sugar3.activity.widgets import ActivityToolbarButton
from sugar3.graphics.toolbarbox import ToolbarButton, ToolbarBox
from sugar3.activity.widgets import StopButton
from fontcombobox import FontComboBox
from fontcombobox import FontSize
from dialogs import TuxStampDialog
def add_menu(icon_name, tooltip, tool_name, button, activate_cb):
menu_item = PaletteMenuItem(icon_name=icon_name, text_label=tooltip)
menu_item.connect('activate', activate_cb, tool_name)
menu_item.icon_name = icon_name
button.menu_box.append_item(menu_item)
menu_item.show()
return menu_item
class DrawToolbarBox(ToolbarBox):
"""Create toolbars for the activity"""
# dictionary - tool name : tool icon name
tool_icon_name = {'ellipse': 'tool-shape-ellipse',
'rectangle': 'tool-shape-rectangle',
'line': 'tool-shape-line',
'freeform': 'tool-shape-freeform',
'heart': 'tool-shape-heart',
'parallelogram': 'tool-shape-parallelogram',
'arrow': 'tool-shape-arrow',
'star': 'tool-shape-star',
'trapezoid': 'tool-shape-trapezoid',
'triangle': 'tool-shape-triangle',
'polygon_regular': 'tool-shape-polygon',
'brush': 'tool-brush',
'eraser': 'tool-eraser',
'bucket': 'tool-bucket',
'picker': 'tool-picket'}
def __init__(self, activity):
self._activity = activity
ToolbarBox.__init__(self)
activity_button = ActivityToolbarButton(self._activity)
self.toolbar.insert(activity_button, -1)
self._activity.set_toolbar_box(self)
edit_toolbar = ToolbarButton()
edit_toolbar.props.page = DrawEditToolbar(self._activity)
edit_toolbar.props.icon_name = 'toolbar-edit'
edit_toolbar.props.label = _('Edit')
self.toolbar.insert(edit_toolbar, -1)
self._fill_color_button = ButtonFillColor(activity)
self._fill_color_button.set_title(_('Shapes properties'))
item_fill_color = Gtk.ToolItem()
item_fill_color.add(self._fill_color_button)
self._fill_color_button.set_sensitive(False)
self._activity.tool_group = None
self.tools_builder = ToolsToolbarBuilder(self.toolbar, self._activity,
self._fill_color_button)
separator = Gtk.SeparatorToolItem()
separator.set_draw(True)
self.toolbar.insert(separator, -1)
self.shapes_button = DrawToolButton('shapes',
self._activity.tool_group,
_('Shapes'))
self.toolbar.insert(self.shapes_button, -1)
self.shapes_builder = ShapesToolbarBuilder(self._activity,
self.shapes_button,
self._fill_color_button)
self.initialize_brush_shape_tools()
self.toolbar.insert(item_fill_color, -1)
separator = Gtk.SeparatorToolItem()
separator.set_draw(True)
self.toolbar.insert(separator, -1)
fonts_button = ToolbarButton()
fonts_button.props.page = TextToolbar(self._activity)
fonts_button.props.icon_name = 'format-text-size'
fonts_button.props.label = _('Fonts')
self.toolbar.insert(fonts_button, -1)
image_button = ToolbarButton()
image_button.props.page = ImageToolbar(self._activity)
image_button.props.icon_name = 'picture'
image_button.props.label = _('Image')
self.toolbar.insert(image_button, -1)
separator = Gtk.SeparatorToolItem()
separator.props.draw = False
separator.set_size_request(0, -1)
separator.set_expand(True)
self.toolbar.insert(separator, -1)
separator.show()
stop = StopButton(self._activity)
self.toolbar.insert(stop, -1)
# TODO: workaround
# the BrushButton does not starts
self.brush_button = self.tools_builder._stroke_color.color_button
area = self._activity.area
self.brush_button.set_brush_shape(area.tool['line shape'])
self.brush_button.set_brush_size(area.tool['line size'])
self.brush_button.set_stamp_size(area.tool['stamp size'])
# init the color
cairo_stroke_color = area.tool['cairo_stroke_color']
red = cairo_stroke_color[0] * 65535
green = cairo_stroke_color[1] * 65535
blue = cairo_stroke_color[2] * 65535
stroke_color = Gdk.Color(red, green, blue)
self.brush_button.set_color(stroke_color)
def initialize_brush_shape_tools(self):
tool_name = self._activity.area.tool['name']
if tool_name in ('brush', 'eraser', 'bucket', 'picker'):
# make the brush tool group
self.tools_builder._tool_brush.set_active(True)
# set the icon
self.tools_builder._tool_brush.set_icon_name(
self.tool_icon_name[tool_name])
self.tools_builder.properties['name'] = tool_name
self._fill_color_button.set_sensitive(False)
elif tool_name in ('ellipse', 'rectangle', 'line', 'freeform', 'heart',
'parallelogram', 'arrow', 'star', 'trapezoid',
'triangle', 'polygon_regular'):
# need to make the shapes tool group active
self.shapes_button.set_active(True)
# set the icon
self.shapes_builder._tool_button.set_icon_name(
self.tool_icon_name[tool_name])
self.shapes_builder._tool_name = tool_name
self.shapes_builder.properties['name'] = tool_name
self._fill_color_button.set_sensitive(True)
# setting the fill color
cairo_fill_color = self._activity.area.tool['cairo_fill_color']
red = cairo_fill_color[0] * 65535
green = cairo_fill_color[1] * 65535
blue = cairo_fill_color[2] * 65535
self._fill_color_button.color = Gdk.Color(red, green, blue)
# Make the Edit Toolbar
class DrawEditToolbar(EditToolbar):
def __init__(self, activity):
EditToolbar.__init__(self)
self._activity = activity
self.undo.set_tooltip(_('Undo'))
self.redo.set_tooltip(_('Redo'))
self.copy.set_tooltip(_('Copy'))
self.paste.set_tooltip(_('Paste'))
separator = Gtk.SeparatorToolItem()
separator.set_draw(True)
self.insert(separator, -1)
self._clear_all = ToolButton('edit-clear')
self.insert(self._clear_all, -1)
self._clear_all.set_tooltip(_('Clear'))
self._clear_all.show()
self._sound = ToggleToolButton('speaker-100')
self._sound.set_tooltip(_('Enable sound'))
if self._activity.area._player is not None:
self.insert(self._sound, -1)
self._sound.show()
self._sound.connect('clicked', self.__sound_cb)
self.undo.connect('clicked', self._undo_cb)
self.redo.connect('clicked', self._redo_cb)
self.copy.connect('clicked', self._copy_cb)
self.paste.connect('clicked', self._paste_cb)
self._clear_all.connect('clicked', self._clear_all_cb)
self._activity.area.connect('undo', self._on_signal_undo_cb)
self._activity.area.connect('redo', self._on_signal_redo_cb)
self._activity.area.connect('select', self._on_signal_select_cb)
self._activity.area.connect('action-saved',
self._on_signal_action_saved_cb)
def _undo_cb(self, widget, data=None):
self._activity.area.undo()
def _redo_cb(self, widget, data=None):
self._activity.area.redo()
def _copy_cb(self, widget, data=None):
self._activity.area.copy()
def _paste_cb(self, widget, data=None):
self._activity.area.paste(self._activity.area)
def _on_signal_undo_cb(self, widget, data=None):
self._verify_sensitive_buttons()
def _on_signal_redo_cb(self, widget, data=None):
self._verify_sensitive_buttons()
def _on_signal_select_cb(self, widget, data=None):
self._verify_sensitive_buttons()
def _on_signal_action_saved_cb(self, widget, data=None):
self._verify_sensitive_buttons()
# define when a button is active
def _verify_sensitive_buttons(self):
self.undo.set_sensitive(self._activity.area.can_undo())
self.redo.set_sensitive(self._activity.area.can_redo())
self.copy.set_sensitive(self._activity.area.is_selected())
# TODO: it is not possible to verify this yet.
# self.paste.set_sensitive(self._activity.area.can_paste())
def _clear_all_cb(self, widget, data=None):
self._activity.area.clear()
def __sound_cb(self, widget):
self._activity.area.enable_sounds(widget.get_active())
if widget.get_active():
self._sound.set_tooltip(_('Disable sound'))
else:
self._sound.set_tooltip(_('Enable sound'))
class DrawToolButton(RadioToolButton):
def __init__(self, icon_name, tool_group, tooltip):
RadioToolButton.__init__(self, icon_name=icon_name)
self.props.group = tool_group
self.set_active(False)
self.set_tooltip(tooltip)
self.selected_button = None
self.palette_invoker.props.toggle_palette = True
self.props.hide_tooltip_on_click = False
if self.props.palette:
self.__palette_cb(None, None)
self.menu_box = PaletteMenuBox()
self.props.palette.set_content(self.menu_box)
self.menu_box.show()
self.connect('notify::palette', self.__palette_cb)
def __palette_cb(self, widget, pspec):
if not isinstance(self.props.palette, RadioPalette):
return
self.props.palette.update_button()
class ToolsToolbarBuilder():
# Tool default definitions
# _TOOL_PENCIL_NAME = 'pencil'
_TOOL_BRUSH_NAME = 'brush'
_TOOL_ERASER_NAME = 'eraser'
_TOOL_BUCKET_NAME = 'bucket'
_TOOL_PICKER_NAME = 'picker'
_TOOL_STAMP_NAME = 'stamp'
_TOOL_MARQUEE_RECT_NAME = 'marquee-rectangular'
def __init__(self, toolbar, activity, fill_color_button):
self._activity = activity
self.properties = self._activity.area.tool
self._fill_color_button = fill_color_button
self._selected_tool_name = self._TOOL_BRUSH_NAME
self._tool_brush = DrawToolButton('tool-brush',
activity.tool_group, _('Brush'))
activity.tool_group = self._tool_brush
toolbar.insert(self._tool_brush, -1)
add_menu('tool-brush', _('Brush'), self._TOOL_BRUSH_NAME,
self._tool_brush, self.set_tool)
add_menu('tool-eraser', _('Eraser'), self._TOOL_ERASER_NAME,
self._tool_brush, self.set_tool)
add_menu('tool-bucket', _('Bucket'), self._TOOL_BUCKET_NAME,
self._tool_brush, self.set_tool)
add_menu('tool-picker', _('Picker'), self._TOOL_PICKER_NAME,
self._tool_brush, self.set_tool)
self._tool_stamp = add_menu('tool-stamp', _('Stamp'),
self._TOOL_STAMP_NAME, self._tool_brush,
self.set_tool)
is_selected = self._activity.area.is_selected()
self._tool_stamp.set_sensitive(is_selected)
add_menu('tool-stamp', _('Load stamp'), 'load-stamp', self._tool_brush,
self.set_tool)
self._activity.area.connect('undo', self._on_signal_undo_cb)
self._activity.area.connect('redo', self._on_signal_redo_cb)
self._activity.area.connect('select', self._on_signal_select_cb)
self._activity.area.connect('action-saved',
self._on_signal_action_saved_cb)
self._tool_marquee_rectangular = add_menu('tool-marquee-rectangular',
_('Select Area'),
self._TOOL_MARQUEE_RECT_NAME,
self._tool_brush,
self.set_tool)
self._tool_brush.connect('clicked', self._tool_button_clicked_cb)
self._stroke_color = ButtonStrokeColor(activity)
self.set_tool(self._tool_brush, self._TOOL_BRUSH_NAME)
self._stroke_color.connect('notify::color', self._color_button_cb)
toolbar.insert(self._stroke_color, -1)
def set_tool(self, widget, tool_name):
"""
Set tool to the Area object. Configures tool's color and size.
@param self -- Gtk.Toolbar
@param widget -- The connected widget, if any;
necessary in case this method is used in a connect()
@param tool_name --The name of the selected tool
"""
if widget != self._tool_brush:
self._tool_brush.set_icon_name(widget.icon_name)
self._stroke_color.set_selected_tool(tool_name)
if tool_name == self._TOOL_STAMP_NAME:
resized_stamp = self._activity.area.setup_stamp()
self._stroke_color.color_button.set_resized_stamp(resized_stamp)
else:
self._stroke_color.color_button.stop_stamping()
if tool_name != self._TOOL_MARQUEE_RECT_NAME:
self._activity.area.end_selection()
if tool_name == 'load-stamp':
dialog = TuxStampDialog(self._activity)
dialog.set_transient_for(self._activity)
dialog.connect('stamp-selected', self._load_stamp)
dialog.show_all()
else:
self._do_setup_tool(tool_name)
def _do_setup_tool(self, tool_name):
self._stroke_color.update_stamping()
self.properties['name'] = tool_name
self._activity.area.set_tool(self.properties)
self._fill_color_button.set_sensitive(False)
self._selected_tool_name = tool_name
def _tool_button_clicked_cb(self, button):
if self._selected_tool_name == 'load-stamp':
# do not open the load stamp dialog when the button is pressed
return
self.set_tool(button, self._selected_tool_name)
def _color_button_cb(self, widget, pspec):
new_color = widget.get_color()
self._activity.area.set_stroke_color(new_color)
def _on_signal_undo_cb(self, widget, data=None):
self._verify_sensitive_buttons()
def _on_signal_redo_cb(self, widget, data=None):
self._verify_sensitive_buttons()
def _on_signal_select_cb(self, widget, data=None):
self._verify_sensitive_buttons()
def _on_signal_action_saved_cb(self, widget, data=None):
self._verify_sensitive_buttons()
def _verify_sensitive_buttons(self):
# Check if there is an area selected or if the "stamp" tool is
# being used
sensitive = self._activity.area.is_selected() or \
self.properties['name'] == 'stamp'
self._tool_stamp.set_sensitive(sensitive)
def _load_stamp(self, widget, filepath):
resized_stamp = self._activity.area.setup_stamp(stamp=filepath)
self._stroke_color.color_button.set_resized_stamp(resized_stamp)
self._do_setup_tool('load-stamp')
class ButtonFillColor(ColorToolButton):
"""Class to manage the Fill Color of a Button"""
def __init__(self, activity):
ColorToolButton.__init__(self)
self._activity = activity
self.properties = self._activity.area.tool
self._button_cb = self.connect('notify::color', self._color_button_cb)
self._inactive_color = style.COLOR_INACTIVE_FILL.get_gdk_color()
# self._old_color = self.get_color()
def _color_button_cb(self, widget, pspec):
self._old_color = self.get_color()
self.set_fill_color(self._old_color)
def set_fill_color(self, color):
self._activity.area.set_fill_color(color)
def set_sensitive(self, value):
self.handler_block(self._button_cb)
if value:
self.set_color(self._old_color)
else:
self.set_color(self._inactive_color)
self.handler_unblock(self._button_cb)
ColorToolButton.set_sensitive(self, value)
def create_palette(self):
self._palette = self.get_child().create_palette()
color_palette_hbox = self._palette._picker_hbox
content_box = Gtk.VBox()
# Fill option
fill_checkbutton = Gtk.CheckButton(_('Fill'))
fill_checkbutton.set_active(self.properties['fill'])
fill_checkbutton.connect('toggled',
self._on_fill_checkbutton_toggled)
content_box.pack_start(fill_checkbutton, True, True, 0)
keep_aspect_checkbutton = Gtk.CheckButton(_('Keep aspect'))
logging.debug('Create palette : tool name %s', self.properties['name'])
ratio = self._activity.area.keep_shape_ratio
keep_aspect_checkbutton.set_active(ratio)
keep_aspect_checkbutton.connect(
'toggled', self._on_keep_aspect_checkbutton_toggled)
content_box.pack_start(keep_aspect_checkbutton, True, True, 0)
# We want choose the number of sides to our polygon
spin = Gtk.SpinButton()
# This is where we set restrictions for sides in Regular Polygon:
# Initial value, minimum value, maximum value, step
adj = Gtk.Adjustment(self.properties['vertices'], 3.0, 50.0, 1.0)
spin.set_adjustment(adj)
spin.set_numeric(True)
label = Gtk.Label(label=_('Sides: '))
hbox = Gtk.HBox()
hbox.show_all()
hbox.pack_start(label, True, True, 0)
hbox.pack_start(spin, True, True, 0)
content_box.pack_start(hbox, True, True, 0)
hbox.show_all()
spin.connect('value-changed', self._on_vertices_value_changed)
color_palette_hbox.pack_start(Gtk.VSeparator(), True, True,
padding=style.DEFAULT_SPACING)
color_palette_hbox.pack_start(content_box, True, True,
padding=style.DEFAULT_SPACING)
color_palette_hbox.show_all()
return self._palette
def _on_vertices_value_changed(self, spinbutton):
self.properties['vertices'] = spinbutton.get_value_as_int()
def _on_fill_checkbutton_toggled(self, checkbutton):
logging.debug('Checkbutton is Active: %s', checkbutton.get_active())
self.properties['fill'] = checkbutton.get_active()
def _on_keep_aspect_checkbutton_toggled(self, checkbutton):
self._activity.area.keep_shape_ratio = checkbutton.get_active()
def do_draw(self, cr):
if self._palette and self._palette.is_up():
allocation = self.get_allocation()
# draw a black background, has been done by the engine before
cr.set_source_rgb(0, 0, 0)
cr.rectangle(0, 0, allocation.width, allocation.height)
cr.paint()
Gtk.ToolItem.do_draw(self, cr)
if self._palette and self._palette.is_up():
invoker = self._palette.props.invoker
invoker.draw_rectangle(cr, self._palette)
return False
# Make the Shapes Toolbar
class ShapesToolbarBuilder():
_SHAPE_ARROW_NAME = 'arrow'
_SHAPE_CURVE_NAME = 'curve'
_SHAPE_ELLIPSE_NAME = 'ellipse'
_SHAPE_FREEFORM_NAME = 'freeform'
_SHAPE_HEART_NAME = 'heart'
_SHAPE_LINE_NAME = 'line'
_SHAPE_PARALLELOGRAM_NAME = 'parallelogram'
_SHAPE_POLYGON_NAME = 'polygon_regular'
_SHAPE_RECTANGLE_NAME = 'rectangle'
_SHAPE_STAR_NAME = 'star'
_SHAPE_TRAPEZOID_NAME = 'trapezoid'
_SHAPE_TRIANGLE_NAME = 'triangle'
def __init__(self, activity, button, fill_color_button):
self._activity = activity
self.properties = self._activity.area.tool
self._tool_button = button
self._fill_color_button = fill_color_button
self._tool_name = None
add_menu('tool-shape-ellipse', _('Ellipse'), self._SHAPE_ELLIPSE_NAME,
button, self.set_tool)
add_menu('tool-shape-rectangle', _('Rectangle'),
self._SHAPE_RECTANGLE_NAME, button, self.set_tool)
add_menu('tool-shape-line', _('Line'), self._SHAPE_LINE_NAME, button,
self.set_tool)
add_menu('tool-shape-freeform', _('Free form'),
self._SHAPE_FREEFORM_NAME, button, self.set_tool)
add_menu('tool-shape-polygon', _('Polygon'), self._SHAPE_POLYGON_NAME,
button, self.set_tool)
add_menu('tool-shape-heart', _('Heart'), self._SHAPE_HEART_NAME,
button, self.set_tool)
add_menu('tool-shape-parallelogram', _('Parallelogram'),
self._SHAPE_PARALLELOGRAM_NAME, button, self.set_tool)
add_menu('tool-shape-arrow', _('Arrow'), self._SHAPE_ARROW_NAME,
button, self.set_tool)
add_menu('tool-shape-star', _('Star'), self._SHAPE_STAR_NAME, button,
self.set_tool)
add_menu('tool-shape-trapezoid', _('Trapezoid'),
self._SHAPE_TRAPEZOID_NAME, button, self.set_tool)
add_menu('tool-shape-triangle', _('Triangle'),
self._SHAPE_TRIANGLE_NAME, button, self.set_tool)
button.connect('clicked', self.button_set_tool)
button.show_all()
def button_set_tool(self, button):
self.set_tool(button, self._tool_name)
def set_tool(self, widget, tool_name):
logging.debug('tool_name %s', tool_name)
if tool_name is None:
return
if widget != self._tool_button:
self._tool_button.set_icon_name(widget.icon_name)
self._tool_name = tool_name
self.properties['name'] = tool_name
self._activity.area.set_tool(self.properties)
self._fill_color_button.set_sensitive(True)
self._activity.area.end_selection()
# Make the Text Toolbar
class TextToolbar(Gtk.Toolbar):
_ACTION_TEXT_NAME = 'text'
def __init__(self, activity):
Gtk.Toolbar.__init__(self)
self._activity = activity
self.properties = self._activity.area.tool
self._text = DrawToolButton('text', activity.tool_group, _('Type'))
self.insert(self._text, -1)
self._text.connect('clicked', self.set_tool, self._ACTION_TEXT_NAME)
separator = Gtk.SeparatorToolItem()
separator.set_draw(True)
self.insert(separator, -1)
self._bold = ToggleToolButton('format-text-bold')
self.insert(self._bold, -1)
self._bold.show()
self._bold.connect('clicked', self.__bold_bt_cb)
self._italic = ToggleToolButton('format-text-italic')
self.insert(self._italic, -1)
self._italic.show()
self._italic.connect('clicked', self.__italic_bt_cb)
separator = Gtk.SeparatorToolItem()
separator.set_draw(True)
self.insert(separator, -1)
self._font_size = FontSize()
self.insert(self._font_size, -1)
self._font_size_changed_id = self._font_size.connect(
'changed', self.__font_size_changed_cb)
self._font_combo = FontComboBox()
self._fonts_changed_id = self._font_combo.connect(
'changed', self.__font_changed_cb)
fd = activity.area.get_font_description()
font_name = fd.get_family()
self._font_combo.set_font_name(font_name)
self._font_size.set_font_size(int(fd.get_size() / Pango.SCALE))
tool_item = ToolComboBox(self._font_combo)
self.insert(tool_item, -1)
self.show_all()
def __bold_bt_cb(self, button):
fd = self._activity.area.get_font_description()
if button.get_active():
fd.set_weight(Pango.Weight.BOLD)
else:
fd.set_weight(Pango.Weight.NORMAL)
self._activity.area.set_font_description(fd)
def __italic_bt_cb(self, button):
fd = self._activity.area.get_font_description()
if button.get_active():
fd.set_style(Pango.Style.ITALIC)
else:
fd.set_style(Pango.Style.NORMAL)
self._activity.area.set_font_description(fd)
def __font_size_changed_cb(self, widget):
fd = self._activity.area.get_font_description()
value = widget.get_font_size()
fd.set_size(int(value) * Pango.SCALE)
self._activity.area.set_font_description(fd)
def __font_changed_cb(self, combo):
fd = self._activity.area.get_font_description()
font_name = combo.get_font_name()
fd.set_family(font_name)
self._activity.area.set_font_description(fd)
def get_active_text(self, combobox):
model = combobox.get_model()
active = combobox.get_active()
if active < 0:
return None
return model[active][0]
def set_tool(self, widget, tool_name):
self.properties['name'] = tool_name
self._activity.area.set_tool(self.properties)
# Make the Images Toolbar
class ImageToolbar(Gtk.Toolbar):
_EFFECT_RAINBOW_NAME = 'rainbow'
_EFFECT_KALIDOSCOPE_NAME = 'kalidoscope'
def __init__(self, activity):
Gtk.Toolbar.__init__(self)
self._activity = activity
self.properties = self._activity.area.tool
self._object_insert = ToolButton('insert-picture')
self.insert(self._object_insert, -1)
self._object_insert.set_tooltip(_('Insert Image'))
separator = Gtk.SeparatorToolItem()
separator.set_draw(True)
self.insert(separator, -1)
self.width_percent = 1.
self.height_percent = 1.
# FIXME: Sometimes we get the gnome icons not the sugar ones
self._object_rotate_left = ToolButton('object-rotate-left-sugar')
self.insert(self._object_rotate_left, -1)
self._object_rotate_left.set_tooltip(_('Rotate Left'))
self._object_rotate_right = ToolButton('object-rotate-right-sugar')
self.insert(self._object_rotate_right, -1)
self._object_rotate_right.set_tooltip(_('Rotate Right'))
self._mirror_horizontal = ToolButton('mirror-horizontal')
self.insert(self._mirror_horizontal, -1)
self._mirror_horizontal.show()
self._mirror_horizontal.set_tooltip(_('Horizontal Mirror'))
self._mirror_vertical = ToolButton('mirror-vertical')
self.insert(self._mirror_vertical, -1)
self._mirror_vertical.show()
self._mirror_vertical.set_tooltip(_('Vertical Mirror'))
separator = Gtk.SeparatorToolItem()
separator.set_draw(True)
self.insert(separator, -1)
self._effect_grayscale = ToolButton('effect-grayscale')
self.insert(self._effect_grayscale, -1)
self._effect_grayscale.set_tooltip(_('Grayscale'))
self._effect_rainbow = RadioToolButton('effect-rainbow')
self._effect_rainbow.props.group = activity.tool_group
self.insert(self._effect_rainbow, -1)
self._effect_rainbow.set_tooltip(_('Rainbow'))
self._effect_kalidoscope = RadioToolButton('effect-kalidoscope')
self._effect_kalidoscope.props.group = activity.tool_group
self.insert(self._effect_kalidoscope, -1)
self._effect_kalidoscope.set_tooltip(_('Kaleidoscope'))
self._invert_colors = ToolButton('invert-colors')
self.insert(self._invert_colors, -1)
self._invert_colors.set_tooltip(_('Invert Colors'))
self._object_insert.connect('clicked', self.insertImage, activity)
self._object_rotate_left.connect('clicked', self.rotate_left,
activity)
self._object_rotate_right.connect('clicked', self.rotate_right,
activity)
self._mirror_vertical.connect('clicked', self.mirror_vertical)
self._mirror_horizontal.connect('clicked', self.mirror_horizontal)
self._effect_grayscale.connect('clicked', self.grayscale)
self._effect_rainbow.connect('clicked', self.rainbow)
self._effect_kalidoscope.connect('clicked', self.kalidoscope)
self._invert_colors.connect('clicked', self.invert_colors)
self.show_all()
def rotate_left(self, widget, activity):
activity.area.rotate_left(activity.area)
def rotate_right(self, widget, activity):
activity.area.rotate_right(activity.area)
def mirror_horizontal(self, widget):
self._activity.area.mirror(widget)
def mirror_vertical(self, widget):
self._activity.area.mirror(widget, horizontal=False)
def insertImage(self, widget, activity):
try:
chooser = ObjectChooser(self._activity, what_filter='Image',
filter_type=FILTER_TYPE_GENERIC_MIME,
show_preview=True)
except:
# for compatibility with older versions
chooser = ObjectChooser(self._activity, what_filter='Image')
try:
result = chooser.run()
if result == Gtk.ResponseType.ACCEPT:
logging.debug('ObjectChooser: %r',
chooser.get_selected_object())
jobject = chooser.get_selected_object()
if jobject and jobject.file_path:
self._activity.area.load_image(jobject.file_path)
finally:
chooser.destroy()
del chooser
# Make the colors be in grayscale
def grayscale(self, widget):
self._activity.area.grayscale(widget)
# Like the brush, but change it color when painting
def rainbow(self, widget):
self.properties['name'] = self._EFFECT_RAINBOW_NAME
self._activity.area.set_tool(self.properties)
def kalidoscope(self, widget):
self.properties['name'] = self._EFFECT_KALIDOSCOPE_NAME
self._activity.area.set_tool(self.properties)
def invert_colors(self, widget):
self._activity.area.invert_colors()
|
godiard/paint-activity
|
toolbox.py
|
Python
|
gpl-2.0
| 33,175
|
[
"VisIt"
] |
21c18f93dae611dfefff09d6424a522802ea6bbbce87bef479bd191b1ea7e95f
|
from __future__ import unicode_literals, division, absolute_import
from datetime import timedelta, datetime
from flexget.manager import Session
from flexget.plugins.api_tvmaze import APITVMaze, TVMazeLookup, TVMazeSeries
from tests import FlexGetBase, use_vcr
lookup_series = APITVMaze.series_lookup
class TestTVMazeShowLookup(FlexGetBase):
__yaml__ = """
templates:
global:
tvmaze_lookup: yes
set:
afield: "{{tvdb_id}}{{tvmaze_episode_name}}{{tvmaze_series_name}}"
tasks:
test:
mock:
- {title: 'House.S01E02.HDTV.XViD-FlexGet'}
- {title: 'Doctor.Who.2005.S02E03.PDTV.XViD-FlexGet'}
series:
- House
- Doctor Who 2005
test_unknown_series:
mock:
- {title: 'Aoeu.Htns.S01E01.htvd'}
series:
- Aoeu Htns
test_date:
mock:
- title: the daily show 2012-6-6
series:
- the daily show (with jon stewart)
test_search_result:
mock:
- {title: 'Shameless.2011.S01E02.HDTV.XViD-FlexGet'}
- {title: 'Shameless.2011.S03E02.HDTV.XViD-FlexGet'}
series:
- Shameless (2011)
test_title_with_year:
mock:
- {title: 'The.Flash.2014.S02E06.HDTV.x264-LOL'}
series:
- The Flash (2014)
test_from_filesystem:
filesystem:
path: tvmaze_test_dir/
recursive: yes
series:
- The Walking Dead
- The Big Bang Theory
- Marvels Jessica Jones
- The Flash (2014)
test_series_expiration:
mock:
- {title: 'Shameless.2011.S03E02.HDTV.XViD-FlexGet'}
series:
- Shameless (2011)
test_show_is_number:
mock:
- {title: '1992.S01E02.720p.HDTV.XViD-FlexGet'}
- {title: '24 S09E12 HDTV x264-LOL'}
series:
- 1992
- 24
test_show_contain_number:
mock:
- {title: 'Tosh.0 S07E30 HDTV x264-MiNDTHEGAP'}
- {title: 'Unwrapped 2.0 S02E06 HDTV x264-MiNDTHEGAP'}
- {title: 'Detroit 1-8-7 S01E16 HDTV x264-MiNDTHEGAP'}
- {title: 'Jake 2.0 S01E10 HDTV x264-MiNDTHEGAP'}
series:
- Detroit 1-8-7
- Jake 2.0
- Unwrapped 2.0
- Tosh.0
test_episode_without_air_date:
mock:
- {title: 'Firefly S01E13 HDTV x264-LOL'}
series:
- Firefly
set:
bfield: "{{tvmaze_episode_airdate}}{{tvmaze_episode_airstamp}}"
test_episode_summary:
mock:
- {title: 'The.Flash.2014.S02E02.HDTV.x264-LOL'}
series:
- The Flash
test_show_with_non_ascii_chars:
mock:
- {title: 'Unite 9 S01E16 VFQ HDTV XviD-bLinKkY'}
series:
- Unite 9
test_show_cast:
mock:
- {title: 'The.Flash.2014.S02E02.HDTV.x264-LOL'}
series:
- The Flash
test_multiple_characters_per_actor:
mock:
- {title: 'Californication.S01E01.HDTV.x264-LOL'}
- {title: 'The.X-Files.S01E01.HDTV.x264-LOL'}
- {title: 'Aquarius.US.S01E1.HDTV.x264-LOL'}
series:
- Californication
- The X-Files
- Aquarius
test_episode_air_date:
mock:
- {title: 'The.Flash.2014.S02E02.HDTV.x264-LOL'}
series:
- The Flash
test_queries_via_ids:
mock:
- {title: 'The.Flash.2014.S02E02.HDTV.x264-LOL', tvmaze_id: '13'}
- {title: 'The.Flash.2014.S02E03.HDTV.x264-LOL', tvdb_id: '279121'}
- {title: 'The.Flash.2014.S02E04.HDTV.x264-LOL', imdb_id: 'tt3107288'}
series:
- The Flash
"""
@use_vcr
def test_lookup_name(self):
self.execute_task('test')
entry = self.task.find_entry(title='House.S01E02.HDTV.XViD-FlexGet')
assert entry['tvmaze_series_id'] == 118, \
'Tvmaze_ID should be 118 is %s for %s' % (entry['tvmaze_series_name'], entry['series_name'])
assert entry['tvmaze_series_status'] == 'Ended', 'Series Status should be "ENDED" returned %s' \
% (entry['tvmaze_series_status'])
@use_vcr
def test_lookup(self):
self.execute_task('test')
entry = self.task.find_entry(title='House.S01E02.HDTV.XViD-FlexGet')
assert entry['tvmaze_episode_name'] == 'Paternity', \
'%s tvmaze_episode_name should be Paternity, is actually %s' % (
entry['title'], entry['tvmaze_episode_name'])
assert entry['tvmaze_series_status'] == 'Ended', \
'status for %s is %s, should be "ended"' % (entry['title'], entry['tvmaze_series_status'])
assert entry['afield'] == '73255PaternityHouse', \
'afield was not set correctly, expected 73255PaternityHouse, got %s' % entry['afield']
assert self.task.find_entry(tvmaze_episode_name='School Reunion'), \
'Failed imdb lookup Doctor Who 2005 S02E03'
@use_vcr
def test_unknown_series(self):
# Test an unknown series does not cause any exceptions
self.execute_task('test_unknown_series')
# Make sure it didn't make a false match
entry = self.task.find_entry('accepted', title='Aoeu.Htns.S01E01.htvd')
assert entry.get('tvdb_id') is None, 'should not have populated tvdb data'
@use_vcr
def test_search_results(self):
self.execute_task('test_search_result')
entry = self.task.entries[0]
print entry['tvmaze_series_name'].lower()
assert entry['tvmaze_series_name'].lower() == 'Shameless'.lower(), 'lookup failed'
with Session() as session:
assert self.task.entries[1]['tvmaze_series_name'].lower() == 'Shameless'.lower(), 'second lookup failed'
assert len(session.query(TVMazeLookup).all()) == 1, 'should have added 1 show to search result'
assert len(session.query(TVMazeSeries).all()) == 1, 'should only have added one show to show table'
assert session.query(
TVMazeSeries).first().name == 'Shameless', 'should have added Shameless and not Shameless (2011)'
# change the search query
session.query(TVMazeLookup).update({'search_name': "Shameless.S01E03.HDTV-FlexGet"})
session.commit()
lookupargs = {'title': "Shameless.S01E03.HDTV-FlexGet"}
series = APITVMaze.series_lookup(**lookupargs)
assert series.tvdb_id == entry['tvdb_id'], 'tvdb id should be the same as the first entry'
assert series.tvmaze_id == entry['tvmaze_series_id'], 'tvmaze id should be the same as the first entry'
assert series.name.lower() == entry['tvmaze_series_name'].lower(), 'series name should match first entry'
@use_vcr
def test_date(self):
self.execute_task('test_date')
entry = self.task.find_entry(title='the daily show 2012-6-6')
assert entry.get('tvmaze_series_id') == 249, 'expected tvmaze_series_id 249, got %s' % entry.get(
'tvmaze_series_id')
assert entry.get('tvmaze_episode_id') == 20471, 'episode id should be 20471, is actually %s' % entry.get(
'tvmaze_episode_id')
@use_vcr
def test_title_with_year(self):
self.execute_task('test_title_with_year')
entry = self.task.find_entry(title='The.Flash.2014.S02E06.HDTV.x264-LOL')
assert entry.get('tvmaze_series_id') == 13, 'expected tvmaze_series_id 13, got %s' % entry.get(
'tvmaze_series_id')
assert entry.get('tvmaze_series_year') == 2014, 'expected tvmaze_series_year 2014, got %s' % entry.get(
'tvmaze_series_year')
@use_vcr
def test_from_filesystem(self):
self.execute_task('test_from_filesystem')
entry = self.task.find_entry(title='Marvels.Jessica.Jones.S01E02.PROPER.720p.WEBRiP.x264-QCF')
assert entry.get('tvmaze_series_id') == 1370, 'expected tvmaze_series_id 1370, got %s' % entry.get(
'tvmaze_series_id')
assert entry.get('tvmaze_episode_id') == 206178, 'episode id should be 206178, is actually %s' % entry.get(
'tvmaze_episode_id')
entry = self.task.find_entry(title='Marvels.Jessica.Jones.S01E03.720p.WEBRiP.x264-QCF')
assert entry.get('tvmaze_series_id') == 1370, 'expected tvmaze_series_id 1370, got %s' % entry.get(
'tvmaze_series_id')
assert entry.get('tvmaze_episode_id') == 206177, 'episode id should be 206177, is actually %s' % entry.get(
'tvmaze_episode_id')
entry = self.task.find_entry(title='The.Big.Bang.Theory.S09E09.720p.HDTV.X264-DIMENSION')
assert entry.get('tvmaze_series_id') == 66, 'expected tvmaze_series_id 66, got %s' % entry.get(
'tvmaze_series_id')
assert entry.get('tvmaze_episode_id') == 409180, 'episode id should be 409180, is actually %s' % entry.get(
'tvmaze_episode_id')
entry = self.task.find_entry(title='The.Flash.S02E04.1080p.WEB-DL.DD5.1.H.264-KiNGS')
assert entry.get('tvmaze_series_id') == 13, 'expected tvmaze_series_id 13, got %s' % entry.get(
'tvmaze_series_id')
assert entry.get('tvmaze_episode_id') == 284974, 'episode id should be 284974, is actually %s' % entry.get(
'tvmaze_episode_id')
entry = self.task.find_entry(title='The.Walking.Dead.S06E08.Start.to.Finish-SiCKBEARD')
assert entry.get('tvmaze_series_id') == 73, 'expected tvmaze_series_id 73, got %s' % entry.get(
'tvmaze_series_id')
assert entry.get('tvmaze_episode_id') == 185073, 'episode id should be 185073, is actually %s' % entry.get(
'tvmaze_episode_id')
@use_vcr
def test_series_expiration(self):
self.execute_task('test_series_expiration')
entry = self.task.entries[0]
assert entry['tvmaze_series_name'].lower() == 'Shameless'.lower(), 'lookup failed'
assert entry['tvmaze_episode_id'] == 11134, 'episode id should be 11134, instead its %s' % entry[
'tvmaze_episode_id']
with Session() as session:
# Manually change a value of the series to differ from actual value
assert session.query(
TVMazeSeries).first().name == 'Shameless', 'should have added Shameless and not Shameless (2011)'
session.query(TVMazeSeries).update({'weight': 99})
session.commit()
# Verify value has changed successfully and series expiration status is still False
assert session.query(TVMazeSeries).first().expired == False, 'expired status should be False'
assert session.query(TVMazeSeries).first().weight == 99, 'should be updated to 99'
# Set series last_update time to 8 days ago, to trigger a show refresh upon request.
last_week = datetime.now() - timedelta(days=8) # Assuming max days between refreshes is 7
session.query(TVMazeSeries).update({'last_update': last_week})
session.commit()
# Verify series expiration flag is now True
assert session.query(TVMazeSeries).first().expired == True, 'expired status should be True'
lookupargs = {'title': "Shameless"}
series = APITVMaze.series_lookup(**lookupargs)
# Verify series data has been refreshed with actual values upon 2nd call, and series expiration flag
# is set to False
assert series.weight == 5, \
'weight should have been updated back to 4 from 99, instead its %s' % series.weight
assert session.query(TVMazeSeries).first().expired == False, 'expired status should be False'
@use_vcr
def test_test_show_is_number(self):
self.execute_task('test_show_is_number')
entry = self.task.find_entry(series_name='1992')
assert entry['tvmaze_series_name'] == '1992'.lower(), 'lookup failed'
assert entry['tvmaze_series_id'] == 4879, 'series id should be 4879, instead its %s' % entry[
'tvmaze_series_id']
assert entry['tvmaze_episode_id'] == 308487, 'episode id should be 308487, instead its %s' % entry[
'tvmaze_episode_id']
entry = self.task.find_entry(series_name='24')
assert entry['tvmaze_series_name'] == '24'.lower(), 'lookup failed'
assert entry['tvmaze_series_id'] == 167, 'series id should be 167, instead its %s' % entry[
'tvmaze_series_id']
assert entry['tvmaze_episode_id'] == 12094, 'episode id should be 12094, instead its %s' % entry[
'tvmaze_episode_id']
@use_vcr
def test_show_contain_number(self):
self.execute_task('test_show_contain_number')
entry = self.task.find_entry(series_name='Detroit 1-8-7')
assert entry['tvmaze_series_name'] == 'Detroit 1-8-7', \
'tvmaze_series_name should be Detroit 1-8-7, instead its %s' % entry['tvmaze_series_name']
assert entry['tvmaze_series_id'] == 998, 'series id should be 998, instead its %s' % entry[
'tvmaze_series_id']
assert entry['tvmaze_episode_id'] == 98765, 'episode id should be 98765, instead its %s' % entry[
'tvmaze_episode_id']
entry = self.task.find_entry(series_name='Tosh.0')
assert entry['tvmaze_series_name'] == 'Tosh.0', \
'tvmaze_series_name should be Tosh.0, instead its %s' % entry['tvmaze_series_name']
assert entry['tvmaze_series_id'] == 260, 'series id should be 260, instead its %s' % entry[
'tvmaze_series_id']
assert entry['tvmaze_episode_id'] == 457679, 'episode id should be 457679, instead its %s' % entry[
'tvmaze_episode_id']
entry = self.task.find_entry(series_name='Unwrapped 2.0')
assert entry['tvmaze_series_name'] == 'Unwrapped 2.0', \
'tvmaze_series_name should be Unwrapped 2.0, instead its %s' % entry['tvmaze_series_name']
assert entry['tvmaze_series_id'] == 5736, 'series id should be 5736, instead its %s' % entry[
'tvmaze_series_id']
assert entry['tvmaze_episode_id'] == 387214, 'episode id should be 387214, instead its %s' % entry[
'tvmaze_episode_id']
entry = self.task.find_entry(series_name='Jake 2.0')
assert entry['tvmaze_series_name'] == 'Jake 2.0', \
'tvmaze_series_name should be Jake 2.0, instead its %s' % entry['tvmaze_series_name']
assert entry['tvmaze_series_id'] == 2381, 'series id should be 2381, instead its %s' % entry[
'tvmaze_series_id']
assert entry['tvmaze_episode_id'] == 184265, 'episode id should be 184265, instead its %s' % entry[
'tvmaze_episode_id']
@use_vcr
def test_episode_without_air_date_and_air_stamp(self):
self.execute_task('test_episode_without_air_date')
entry = self.task.find_entry(title='Firefly S01E13 HDTV x264-LOL')
assert entry['tvmaze_series_id'] == 180, 'series id should be 180, instead its %s' % entry[
'tvmaze_series_id']
assert entry['tvmaze_episode_id'] == 13007, 'episode id should be 13007, instead its %s' % entry[
'tvmaze_episode_id']
assert entry['tvmaze_episode_airdate'] == None, \
'Expected airdate to be None, got %s' % entry['tvmaze_episode_airdate']
assert entry['tvmaze_episode_airstamp'] == None, \
'Expected airdate to be None, got %s' % entry['tvmaze_episode_airstamp']
@use_vcr
def test_episode_summary(self):
expected_summary = u"The team's visitors, Jay Garrick, explains that he comes from a parallel world" \
u" and was a speedster there, but lost his powers transitioning over. Now he insists" \
u" that Barry needs his help fighting a new metahuman, Sand Demon, who came from" \
u" Jay's world. Meanwhile, Officer Patty Spivot tries to join Joe's Metahuman Taskforce."
self.execute_task('test_episode_summary')
entry = self.task.entries[0]
assert entry['tvmaze_series_id'] == 13, 'series id should be 13, instead its %s' % entry[
'tvmaze_series_id']
assert entry['tvmaze_episode_id'] == 211206, 'episode id should be 211206, instead its %s' % entry[
'tvmaze_episode_id']
assert entry['tvmaze_episode_summary'] == expected_summary, 'Expected summary is different %s' % entry[
'tvmaze_episode_summary']
@use_vcr
def test_show_with_non_ascii_chars(self):
self.execute_task('test_show_with_non_ascii_chars')
entry = self.task.entries[0]
assert entry['tvmaze_series_name'] == u'Unit\xe9 9', u'series id should be Unit\xe9 9, instead its %s' % entry[
'tvmaze_series_name']
assert entry['tvmaze_series_id'] == 8652, 'series id should be 8652, instead its %s' % entry[
'tvmaze_series_id']
assert entry['tvmaze_episode_id'] == 476294, 'episode id should be 476294, instead its %s' % entry[
'tvmaze_episode_id']
@use_vcr
def test_show_cast(self):
self.execute_task('test_show_cast')
entry = self.task.entries[0]
assert entry['tvmaze_series_id'] == 13, 'series id should be 13, instead its %s' % entry[
'tvmaze_series_id']
assert entry['tvmaze_episode_id'] == 211206, 'episode id should be 211206, instead its %s' % entry[
'tvmaze_episode_id']
assert len(entry['tvmaze_series_actors']) == 9, \
'expected actors list for series to contain 9 members,' \
' instead it contains %s' % len(entry['tvmaze_series_actors'])
@use_vcr
def test_episode_air_date(self):
self.execute_task('test_episode_air_date')
entry = self.task.entries[0]
assert entry['tvmaze_series_id'] == 13, 'series id should be 13, instead its %s' % entry[
'tvmaze_series_id']
assert entry['tvmaze_episode_id'] == 211206, 'episode id should be 211206, instead its %s' % entry[
'tvmaze_episode_id']
assert isinstance(entry['tvmaze_episode_airdate'], datetime), 'expected to received datetime type'
airdate = datetime.strftime(entry['tvmaze_episode_airdate'], '%Y-%m-%d')
assert airdate == '2015-10-13', 'episode airdate should be 2015-10-13, instead its %s' % airdate
@use_vcr
def test_queries_via_ids(self):
self.execute_task('test_queries_via_ids')
entry = self.task.entries[0]
assert entry['tvmaze_series_id'] == 13, 'series id should be 13, instead its %s' % entry[
'tvmaze_series_id']
assert entry['tvmaze_episode_id'] == 211206, 'episode id should be 211206, instead its %s' % entry[
'tvmaze_episode_id']
entry = self.task.entries[1]
assert entry['tvmaze_series_id'] == 13, 'series id should be 13, instead its %s' % entry[
'tvmaze_series_id']
assert entry['tvmaze_episode_id'] == 187808, 'episode id should be 187808, instead its %s' % entry[
'tvmaze_episode_id']
entry = self.task.entries[2]
assert entry['tvmaze_series_id'] == 13, 'series id should be 13, instead its %s' % entry[
'tvmaze_series_id']
assert entry['tvmaze_episode_id'] == 284974, 'episode id should be 284974, instead its %s' % entry[
'tvmaze_episode_id']
|
cvium/Flexget
|
tests/test_tvmaze.py
|
Python
|
mit
| 19,929
|
[
"Firefly"
] |
b260f62ca3b388a6282cfb2b58aa396cf5f50b6562a0e04a35cff1b1f8f4530c
|
#!/usr/bin/python
"""
Copyright 2010 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import Cookie
import dbSession
import dbShared
import cgi
import MySQLdb
#
def findName(nameString):
conn = dbShared.ghConn()
cursor = conn.cursor()
cursor.execute("SELECT userID FROM tUsers WHERE userID='" + nameString + "'")
row = cursor.fetchone()
if row == None:
userid = ""
else:
userid = row[0]
cursor.close()
conn.close()
return userid
# Main program
form = cgi.FieldStorage()
uname = form.getfirst("uname", "")
uname = dbShared.dbInsertSafe(uname)
result = ""
tmpID = findName(uname)
if (tmpID == ""):
result = ""
else:
result = "That user name is not available."
print 'Content-type: text/html\n'
print result
|
clreinki/GalaxyHarvester
|
nameAvailable.py
|
Python
|
agpl-3.0
| 1,490
|
[
"Galaxy"
] |
a0cf3c51cfd436f232e97c0801127ab171419feb3dc20ba4dfecbb1b675360f5
|
#!/usr/bin/env python
# File created on 09 Feb 2010
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Greg Caporaso", "Jai Ram Rideout", "Jose Antonio Navas Molina"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
from os.path import join, abspath
from qiime.util import (get_options_lookup, load_qiime_config, make_option,
parse_command_line_parameters)
from qiime.parallel.assign_taxonomy import ParallelBlastTaxonomyAssigner
qiime_config = load_qiime_config()
options_lookup = get_options_lookup()
default_reference_seqs_fp = qiime_config['assign_taxonomy_reference_seqs_fp']
default_id_to_taxonomy_fp = qiime_config['assign_taxonomy_id_to_taxonomy_fp']
script_info = {}
script_info[
'brief_description'] = """Parallel taxonomy assignment using BLAST"""
script_info[
'script_description'] = """This script performs like the assign_taxonomy.py script, but is intended to make use of multicore/multiprocessor environments to perform analyses in parallel."""
script_info['script_usage'] = []
script_info['script_usage'].append(
("""Example""",
"""Assign taxonomy to all sequences in the input file (-i) using BLAST with the id to taxonomy mapping file (-t) and reference sequences file (-r), and write the results (-o) to $PWD/blast_assigned_taxonomy/. ALWAYS SPECIFY ABSOLUTE FILE PATHS (absolute path represented here as $PWD, but will generally look something like /home/ubuntu/my_analysis/).""",
"""%prog -i $PWD/inseqs.fasta -t $PWD/id_to_tax.txt -r $PWD/refseqs.fasta -o $PWD/blast_assigned_taxonomy/"""))
script_info[
'output_description'] = """Mapping of sequence identifiers to taxonomy and quality scores."""
script_info['required_options'] = [
make_option('-i', '--input_fasta_fp',
type='existing_filepath', help='full path to ' +
'input_fasta_fp [REQUIRED]'),
make_option('-o', '--output_dir', action='store',
type='new_dirpath', help='full path to store output files ' +
'[REQUIRED]')
]
script_info['optional_options'] = [
make_option('-r', '--reference_seqs_fp', type='existing_filepath',
help='Ref seqs to blast against. Must provide either --blast_db or '
'--reference_seqs_db for assignment with blast [default: %s]'
% default_reference_seqs_fp,
default=default_reference_seqs_fp),
make_option('-b', '--blast_db', type='blast_db',
help='Database to blast against. Must provide either --blast_db or '
'--reference_seqs_db for assignment with blast [default: %default]'),
make_option('-e', '--e_value', type='float',
help='Maximum e-value to record an assignment, only used for blast '
'method [default: %default]', default=0.001),
make_option('-B', '--blastmat_dir', action='store',
type='string', help='full path to directory containing ' +
'blastmat file [default: %default]',
default=qiime_config['blastmat_dir']),
options_lookup['jobs_to_start'],
options_lookup['retain_temp_files'],
options_lookup['suppress_submit_jobs'],
options_lookup['poll_directly'],
options_lookup['cluster_jobs_fp'],
options_lookup['suppress_polling'],
options_lookup['job_prefix'],
options_lookup['seconds_to_sleep']
]
if default_id_to_taxonomy_fp:
script_info['optional_options'].append(
make_option('-t', '--id_to_taxonomy_fp', action='store',
type='existing_filepath', help='full path to ' +
'id_to_taxonomy mapping file [default: %s]' % default_id_to_taxonomy_fp,
default=default_id_to_taxonomy_fp))
else:
script_info['required_options'].append(
make_option('-t', '--id_to_taxonomy_fp', action='store',
type='existing_filepath', help='full path to ' +
'id_to_taxonomy mapping file [REQUIRED]'))
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
if not (opts.reference_seqs_fp or opts.blast_db):
option_parser.error('Either a blast db (via -b) or a collection of '
'reference sequences (via -r) must be passed to '
'assign taxonomy using blast.')
# create dict of command-line options
params = eval(str(opts))
parallel_runner = ParallelBlastTaxonomyAssigner(
cluster_jobs_fp=opts.cluster_jobs_fp,
jobs_to_start=opts.jobs_to_start,
retain_temp_files=opts.retain_temp_files,
suppress_polling=opts.suppress_polling,
seconds_to_sleep=opts.seconds_to_sleep)
parallel_runner(opts.input_fasta_fp,
abspath(opts.output_dir),
params,
job_prefix=opts.job_prefix,
poll_directly=opts.poll_directly,
suppress_submit_jobs=opts.suppress_submit_jobs)
if __name__ == "__main__":
main()
|
wasade/qiime
|
scripts/parallel_assign_taxonomy_blast.py
|
Python
|
gpl-2.0
| 5,210
|
[
"BLAST"
] |
ee5dec72b6ad84a39056ff5fb58fa62dec7abf36ae2cc9d32d519b534969cb1c
|
# Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import chain
from robot.model import TotalStatisticsBuilder
from robot import model, utils
from messagefilter import MessageFilter
from keywordremover import KeywordRemover
from testcase import TestCase
from keyword import Keyword
class TestSuite(model.TestSuite):
__slots__ = ['message', 'starttime', 'endtime']
test_class = TestCase
keyword_class = Keyword
def __init__(self, source='', name='', doc='', metadata=None,
message='', starttime=None, endtime=None):
"""Results of a single test suite.
:ivar parent: Parent :class:`TestSuite` or `None`.
:ivar source: Path to the source file.
:ivar name: Test suite name.
:ivar doc: Test suite documentation.
:ivar metadata: Test suite metadata as a dictionary.
:ivar suites: Child suite results.
:ivar tests: Test case results. a list of :class:`~.testcase.TestCase`
instances.
:ivar keywords: A list containing setup and teardown results.
:ivar message: Possible failure message.
:ivar starttime: Test suite execution start time as a timestamp.
:ivar endtime: Test suite execution end time as a timestamp.
"""
model.TestSuite.__init__(self, source, name, doc, metadata)
self.message = message
self.starttime = starttime
self.endtime = endtime
@property
def status(self):
return 'FAIL' if self.statistics.critical.failed else 'PASS'
@property
def statistics(self):
return TotalStatisticsBuilder(self).stats
@property
def full_message(self):
if not self.message:
return self.statistics.message
return '%s\n\n%s' % (self.message, self.statistics.message)
@property
def elapsedtime(self):
if self.starttime and self.endtime:
return utils.get_elapsed_time(self.starttime, self.endtime)
return sum(child.elapsedtime for child in
chain(self.suites, self.tests, self.keywords))
def remove_keywords(self, how):
self.visit(KeywordRemover(how))
def filter_messages(self, log_level='TRACE'):
self.visit(MessageFilter(log_level))
|
Senseg/robotframework
|
src/robot/result/testsuite.py
|
Python
|
apache-2.0
| 2,826
|
[
"VisIt"
] |
dfb10dd49b5f04a014fa558a2b9e58e147901d6e0e22b9bd5af84dfed013d55f
|
"""
Test the Infinite GMM.
Author : Bertrand Thirion, 2010
"""
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
from ..imm import IMM, MixedIMM, co_labelling
from nose.tools import assert_true
from numpy.testing import assert_array_equal
def test_colabel():
# test the co_labelling functionality
z = np.array([0,1,1,0,2])
c = co_labelling(z).todense()
tc = np.array([[ 1., 0., 0., 1., 0.],
[ 0., 1., 1., 0., 0.],
[ 0., 1., 1., 0., 0.],
[ 1., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 1.]])
assert_array_equal(c, tc)
def test_imm_loglike_1D():
# Check that the log-likelihood of the data under the infinite gaussian
# mixture model is close to the theoretical data likelihood
n = 100
dim = 1
alpha = .5
x = np.random.randn(n, dim)
igmm = IMM(alpha, dim)
igmm.set_priors(x)
# warming
igmm.sample(x, niter=100)
# sampling
like = igmm.sample(x, niter=300)
theoretical_ll = -dim*.5*(1+np.log(2*np.pi))
empirical_ll = np.log(like).mean()
assert_true(np.absolute(theoretical_ll-empirical_ll)<0.25*dim)
def test_imm_loglike_known_groups():
# Check that the log-likelihood of the data under IGMM close to theory
n = 50
dim = 1
alpha = .5
x = np.random.randn(n, dim)
igmm = IMM(alpha, dim)
igmm.set_priors(x)
kfold = np.floor(np.random.rand(n)*5).astype(np.int)
# warming
igmm.sample(x, niter=100)
# sampling
like = igmm.sample(x, niter=300, kfold=kfold)
theoretical_ll = -dim*.5*(1+np.log(2*np.pi))
empirical_ll = np.log(like).mean()
assert_true(np.absolute(theoretical_ll-empirical_ll)<0.25*dim)
def test_imm_loglike_1D_k10():
# Check with k-fold cross validation (k=10)
n = 50
dim = 1
alpha = .5
k = 5
x = np.random.randn(n, dim)
igmm = IMM(alpha, dim)
igmm.set_priors(x)
# warming
igmm.sample(x, niter=100, kfold=k)
# sampling
like = igmm.sample(x, niter=300, kfold=k)
theoretical_ll = -dim*.5*(1+np.log(2*np.pi))
empirical_ll = np.log(like).mean()
assert_true(np.absolute(theoretical_ll-empirical_ll)<0.25*dim)
def test_imm_loglike_2D_fast():
# Faster version for log-likelihood imm
n = 100
dim = 2
alpha = .5
x = np.random.randn(n, dim)
igmm = IMM(alpha, dim)
igmm.set_priors(x)
# warming
igmm.sample(x, niter=100, init=True)
# sampling
like = igmm.sample(x, niter=300)
theoretical_ll = -dim*.5*(1+np.log(2*np.pi))
empirical_ll = np.log(like).mean()
assert_true(np.absolute(theoretical_ll-empirical_ll)<0.25*dim)
def test_imm_loglike_2D():
# Slower cross-validated logL check
n = 50
dim = 2
alpha = .5
k = 5
x = np.random.randn(n, dim)
igmm = IMM(alpha, dim)
igmm.set_priors(x)
# warming
igmm.sample(x, niter=100, init=True, kfold=k)
# sampling
like = igmm.sample(x, niter=300, kfold=k)
theoretical_ll = -dim*.5*(1+np.log(2*np.pi))
empirical_ll = np.log(like).mean()
assert_true(np.absolute(theoretical_ll-empirical_ll)<0.25*dim)
def test_imm_loglike_2D_a0_1():
# Check with alpha=.1
n = 100
dim = 2
alpha = .1
x = np.random.randn(n, dim)
igmm = IMM(alpha, dim)
igmm.set_priors(x)
# warming
igmm.sample(x, niter=100, init=True)
# sampling
like = igmm.sample(x, niter=300)
theoretical_ll = -dim*.5*(1+np.log(2*np.pi))
empirical_ll = np.log(like).mean()
print(theoretical_ll, empirical_ll)
assert_true(np.absolute(theoretical_ll-empirical_ll)<0.2*dim)
def test_imm_wnc():
# Test the basic imm_wnc
n = 50
dim = 1
alpha = .5
g0 = 1.
x = np.random.rand(n, dim)
x[:.3*n] *= .2
x[:.1*n] *= .3
# instantiate
migmm = MixedIMM(alpha, dim)
migmm.set_priors(x)
migmm.set_constant_densities(null_dens=g0)
ncp = 0.5*np.ones(n)
# warming
migmm.sample(x, null_class_proba=ncp, niter=100, init=True)
g = np.reshape(np.linspace(0, 1, 101), (101, dim))
# sampling
like, pproba = migmm.sample(x, null_class_proba=ncp, niter=300,
sampling_points=g)
# the density should sum to 1
ds = 0.01*like.sum()
assert_true(ds<1)
assert_true(ds>.8)
assert_true(np.sum(pproba>.5)>1)
assert_true(np.sum(pproba<.5)>1)
def test_imm_wnc1():
# Test the basic imm_wnc, where the probaility under the null is random
n = 50
dim = 1
alpha = .5
g0 = 1.
x = np.random.rand(n, dim)
x[:.3*n] *= .2
x[:.1*n] *= .3
# instantiate
migmm = MixedIMM(alpha, dim)
migmm.set_priors(x)
migmm.set_constant_densities(null_dens=g0)
ncp = np.random.rand(n)
# warming
migmm.sample(x, null_class_proba=ncp, niter=100, init=True)
g = np.reshape(np.linspace(0, 1, 101), (101, dim))
#sampling
like, pproba = migmm.sample(x, null_class_proba=ncp, niter=300,
sampling_points=g)
# the density should sum to 1
ds = 0.01*like.sum()
assert_true(ds<1)
assert_true(ds>.8)
assert_true(np.sum(pproba>.5)>1)
assert_true(np.sum(pproba<.5)>1)
def test_imm_wnc2():
# Test the basic imm_wnc when null class is shrunk to 0
n = 50
dim = 1
alpha = .5
g0 = 1.
x = np.random.rand(n, dim)
x[:.3*n] *= .2
x[:.1*n] *= .3
# instantiate
migmm = MixedIMM(alpha, dim)
migmm.set_priors(x)
migmm.set_constant_densities(null_dens=g0)
ncp = np.zeros(n)
# warming
migmm.sample(x, null_class_proba=ncp, niter=100, init=True)
# sampling
like, pproba = migmm.sample(x, null_class_proba=ncp, niter=300)
assert_true(like.min()>.1)
assert_true(like.max()<5.)
assert_array_equal(pproba, ncp)
def test_imm_wnc3():
# Test the basic imm_wnc when null class is of prob 1 (nothing is estimated)
n = 50
dim = 1
alpha = .5
g0 = 1.
x = np.random.rand(n, dim)
x[:.3*n] *= .2
x[:.1*n] *= .3
# instantiate
migmm = MixedIMM(alpha, dim)
migmm.set_priors(x)
migmm.set_constant_densities(null_dens=g0)
ncp = np.ones(n)
# warming
migmm.sample(x, null_class_proba=ncp, niter=100, init=True)
# sampling
like, pproba = migmm.sample(x, null_class_proba=ncp, niter=300)
assert_array_equal(pproba, ncp)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
|
alexis-roche/nipy
|
nipy/algorithms/clustering/tests/test_imm.py
|
Python
|
bsd-3-clause
| 6,527
|
[
"Gaussian"
] |
ac02c4e9c1b2019d2c2abafcac2d19100ca677395c9839786e995a74f2b695ef
|
#==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
import re
# The following line defines an ascii string used for dynamically refreshing
# the import and progress callbacks on the same terminal line.
# See http://www.termsys.demon.co.uk/vtansi.htm
# \033 is the C-style octal code for an escape character
# [2000D moves the cursor back 2000 columns, this is a brute force way of
# getting to the start of the line.
# [K erases the end of the line
clrLine = "\033[2000D\033[K"
def auto_not_in_place(v=True):
"""Force it to not run in place
"""
import itkConfig
itkConfig.NotInPlace = v
def auto_progress(progress_type=1):
"""Set up auto progress report
progress_type:
1 or True -> auto progress be used in a terminal
2 -> simple auto progress (without special characters)
0 or False -> disable auto progress
"""
import itkConfig
if progress_type is True or progress_type == 1:
itkConfig.ImportCallback = terminal_import_callback
itkConfig.ProgressCallback = terminal_progress_callback
elif progress_type == 2:
itkConfig.ImportCallback = simple_import_callback
itkConfig.ProgressCallback = simple_progress_callback
elif progress_type is False or progress_type == 0:
itkConfig.ImportCallback = None
itkConfig.ProgressCallback = None
else:
raise ValueError("Invalid auto progress type: " + repr(progress_type))
def terminal_progress_callback(name, p):
"""Display the progress of an object and clean the display once complete
This function can be used with itkConfig.ProgressCallback
"""
import sys
print(clrLine + "%s: %f" % (name, p), file=sys.stderr, end="")
if p == 1:
print(clrLine, file=sys.stderr, end="")
def terminal_import_callback(name, p):
"""Display the loading of a module and clean the display once complete
This function can be used with itkConfig.ImportCallback
"""
import sys
print(clrLine + "Loading %s... " % name, file=sys.stderr, end="")
if p == 1:
print(clrLine, file=sys.stderr, end="")
def simple_import_callback(name, p):
"""Print a message when a module is loading
This function can be used with itkConfig.ImportCallback
"""
import sys
if p == 0:
print("Loading %s... " % name, file=sys.stderr, end="")
elif p == 1:
print("done", file=sys.stderr)
def simple_progress_callback(name, p):
"""Print a message when an object is running
This function can be used with itkConfig.ProgressCallback
"""
import sys
if p == 0:
print("Running %s... " % name, file=sys.stderr, end="")
elif p == 1:
print("done", file=sys.stderr)
def force_load():
"""force itk to load all the submodules"""
import itk
for k in dir(itk):
getattr(itk, k)
import sys
def echo(object, f=sys.stderr):
"""Print an object is f
If the object has a method Print(), this method is used.
repr(object) is used otherwise
"""
print(f, object)
del sys
def size(image_or_filter):
"""Return the size of an image, or of the output image of a filter
This method take care of updating the needed informations
"""
# we don't need the entire output, only its size
image_or_filter.UpdateOutputInformation()
img = output(image_or_filter)
return img.GetLargestPossibleRegion().GetSize()
def physical_size(image_or_filter):
"""Return the physical size of an image, or of the output image of a filter
This method take care of updating the needed informations
"""
# required because range is overloaded in this module
import sys
from builtins import range
spacing_ = spacing(image_or_filter)
size_ = size(image_or_filter)
result = []
for i in range(0, spacing_.Size()):
result.append(spacing_.GetElement(i) * size_.GetElement(i))
return result
def spacing(image_or_filter):
"""Return the spacing of an image, or of the output image of a filter
This method take care of updating the needed informations
"""
# we don't need the entire output, only its size
image_or_filter.UpdateOutputInformation()
img = output(image_or_filter)
return img.GetSpacing()
def origin(image_or_filter):
"""Return the origin of an image, or of the output image of a filter
This method take care of updating the needed informations
"""
# we don't need the entire output, only its size
image_or_filter.UpdateOutputInformation()
img = output(image_or_filter)
return img.GetOrigin()
def index(image_or_filter):
"""Return the index of an image, or of the output image of a filter
This method take care of updating the needed informations
"""
# we don't need the entire output, only its size
image_or_filter.UpdateOutputInformation()
img = output(image_or_filter)
return img.GetLargestPossibleRegion().GetIndex()
def region(image_or_filter):
"""Return the region of an image, or of the output image of a filter
This method take care of updating the needed informations
"""
# we don't need the entire output, only its size
image_or_filter.UpdateOutputInformation()
img = output(image_or_filter)
return img.GetLargestPossibleRegion()
HAVE_NUMPY = True
try:
import numpy
except ImportError:
HAVE_NUMPY = False
def _get_itk_pixelid(numpy_array_type):
"""Returns a ITK PixelID given a numpy array."""
if not HAVE_NUMPY:
raise ImportError('Numpy not available.')
import itk
# This is a Mapping from numpy array types to itk pixel types.
_np_itk = {numpy.uint8:itk.UC,
numpy.uint16:itk.US,
numpy.uint32:itk.UI,
numpy.uint64:itk.UL,
numpy.int8:itk.SC,
numpy.int16:itk.SS,
numpy.int32:itk.SI,
numpy.int64:itk.SL,
numpy.float32:itk.F,
numpy.float64:itk.D,
numpy.complex64:itk.complex[itk.F],
numpy.complex128:itk.complex[itk.D]
}
try:
return _np_itk[numpy_array_type.dtype.type]
except KeyError as e:
for key in _np_itk:
if numpy.issubdtype(numpy_array_type.dtype.type, key):
return _np_itk[key]
raise e
def _GetArrayFromImage(image_or_filter, function, keep_axes, update):
"""Get an Array with the content of the image buffer
"""
# Check for numpy
if not HAVE_NUMPY:
raise ImportError('Numpy not available.')
# Finds the image type
import itk
keys = [k for k in itk.PyBuffer.keys() if k[0] == output(image_or_filter).__class__]
if len(keys ) == 0:
raise RuntimeError("No suitable template parameter can be found.")
ImageType = keys[0]
# Create a numpy array of the type of the input image
templatedFunction = getattr(itk.PyBuffer[keys[0]], function)
return templatedFunction(output(image_or_filter), keep_axes, update)
def GetArrayFromImage(image_or_filter, keep_axes=False, update=True):
"""Get an array with the content of the image buffer
"""
return _GetArrayFromImage(image_or_filter, "GetArrayFromImage", keep_axes, update)
array_from_image = GetArrayFromImage
def GetArrayViewFromImage(image_or_filter, keep_axes=False, update=True):
"""Get an array view with the content of the image buffer
"""
return _GetArrayFromImage(image_or_filter, "GetArrayViewFromImage", keep_axes, update)
array_view_from_image = GetArrayViewFromImage
def _GetImageFromArray(arr, function, is_vector):
"""Get an ITK image from a Python array.
"""
if not HAVE_NUMPY:
raise ImportError('Numpy not available.')
import itk
PixelType = _get_itk_pixelid(arr)
Dimension = arr.ndim
ImageType = itk.Image[PixelType, Dimension]
if is_vector:
Dimension = arr.ndim - 1
if arr.flags['C_CONTIGUOUS']:
VectorDimension = arr.shape[-1]
else:
VectorDimension = arr.shape[0]
if PixelType == itk.UC:
if VectorDimension == 3:
ImageType = itk.Image[ itk.RGBPixel[itk.UC], Dimension ]
elif VectorDimension == 4:
ImageType = itk.Image[ itk.RGBAPixel[itk.UC], Dimension ]
else:
ImageType = itk.Image[ itk.Vector[PixelType, VectorDimension] , Dimension]
templatedFunction = getattr(itk.PyBuffer[ImageType], function)
return templatedFunction(arr, is_vector)
def GetImageFromArray(arr, is_vector=False):
"""Get an ITK image from a Python array.
"""
return _GetImageFromArray(arr, "GetImageFromArray", is_vector)
image_from_array = GetImageFromArray
def GetImageViewFromArray(arr, is_vector=False):
"""Get an ITK image view from a Python array.
"""
return _GetImageFromArray(arr, "GetImageViewFromArray", is_vector)
image_view_from_array = GetImageViewFromArray
def _GetArrayFromVnlObject(vnl_object, function):
"""Get an array with the content of vnl_object
"""
# Check for numpy
if not HAVE_NUMPY:
raise ImportError('Numpy not available.')
# Finds the vnl object type
import itk
PixelType = itk.template(vnl_object)[1][0]
keys = [k for k in itk.PyVnl.keys() if k[0] == PixelType]
if len(keys ) == 0:
raise RuntimeError("No suitable template parameter can be found.")
# Create a numpy array of the type of the vnl object
templatedFunction = getattr(itk.PyVnl[keys[0]], function)
return templatedFunction(vnl_object)
def GetArrayFromVnlVector(vnl_vector):
"""Get an array with the content of vnl_vector
"""
return _GetArrayFromVnlObject(vnl_vector, "GetArrayFromVnlVector")
array_from_vnl_vector = GetArrayFromVnlVector
def GetArrayViewFromVnlVector(vnl_vector):
"""Get an array view of vnl_vector
"""
return _GetArrayFromVnlObject(vnl_vector, "GetArrayViewFromVnlVector")
array_view_from_vnl_vector = GetArrayFromVnlVector
def GetArrayFromVnlMatrix(vnl_matrix):
"""Get an array with the content of vnl_matrix
"""
return _GetArrayFromVnlObject(vnl_matrix, "GetArrayFromVnlMatrix")
def GetArrayViewFromVnlMatrix(vnl_matrix):
"""Get an array view of vnl_matrix
"""
return _GetArrayFromVnlObject(vnl_matrix, "GetArrayViewFromVnlMatrix")
array_from_vnl_matrix = GetArrayFromVnlMatrix
def _GetVnlObjectFromArray(arr, function):
"""Get a vnl object from a Python array.
"""
if not HAVE_NUMPY:
raise ImportError('Numpy not available.')
import itk
PixelType = _get_itk_pixelid(arr)
templatedFunction = getattr(itk.PyVnl[PixelType], function)
return templatedFunction(arr)
def GetVnlVectorFromArray(arr):
"""Get a vnl vector from a Python array.
"""
return _GetVnlObjectFromArray(arr, "GetVnlVectorFromArray")
vnl_vector_from_array = GetVnlVectorFromArray
def GetVnlMatrixFromArray(arr):
"""Get a vnl matrix from a Python array.
"""
return _GetVnlObjectFromArray(arr, "GetVnlMatrixFromArray")
vnl_matrix_from_array = GetVnlMatrixFromArray
def GetArrayFromMatrix(itk_matrix):
return GetArrayFromVnlMatrix(itk_matrix.GetVnlMatrix().as_matrix())
array_from_matrix = GetArrayFromMatrix
def GetMatrixFromArray(arr):
import itk
vnl_matrix = GetVnlMatrixFromArray(arr)
dims = arr.shape
PixelType = _get_itk_pixelid(arr)
m = itk.Matrix[PixelType, dims[0], dims[1]](vnl_matrix)
return m
matrix_from_array = GetMatrixFromArray
def xarray_from_image(image):
"""Convert an itk.Image to an xarray.DataArray.
Origin and spacing metadata is preserved in the xarray's coords. The
Direction is set in the `direction` attribute.
Dims are labeled as `x`, `y`, `z`, and `c`.
This interface is and behavior is experimental and is subject to possible
future changes."""
import xarray as xr
import itk
import numpy as np
array_view = itk.array_view_from_image(image)
spacing = itk.spacing(image)
origin = itk.origin(image)
size = itk.size(image)
direction = np.flip(itk.array_from_matrix(image.GetDirection()))
spatial_dimension = image.GetImageDimension()
spatial_dims = ('x', 'y', 'z')
coords = {}
for index, dim in enumerate(spatial_dims[:spatial_dimension]):
coords[dim] = np.linspace(origin[index],
origin[index] + (size[index]-1)*spacing[index],
size[index],
dtype=np.float64)
dims = list(reversed(spatial_dims[:spatial_dimension]))
components = image.GetNumberOfComponentsPerPixel()
if components > 1:
dims.append('c')
coords['c'] = np.arange(components, dtype=np.uint64)
data_array = xr.DataArray(array_view,
dims=dims,
coords=coords,
attrs={'direction': direction})
return data_array
def image_from_xarray(data_array):
"""Convert an xarray.DataArray to an itk.Image.
Metadata encoded with xarray_from_image is applied to the itk.Image.
This interface is and behavior is experimental and is subject to possible
future changes."""
import numpy as np
import itk
spatial_dims = list({'z', 'y', 'x'}.intersection(set(data_array.dims)))
spatial_dims.sort(reverse=True)
spatial_dimension = len(spatial_dims)
ordered_dims = ('z', 'y', 'x')[-spatial_dimension:]
if ordered_dims != tuple(spatial_dims):
raise ValueError('Spatial dimensions do not have the required order: ' + str(ordered_dims))
is_vector = 'c' in data_array.dims
itk_image = itk.image_view_from_array(data_array.values, is_vector=is_vector)
origin = [0.0]*spatial_dimension
spacing = [1.0]*spatial_dimension
for index, dim in enumerate(spatial_dims):
origin[index] = float(data_array.coords[dim][0])
spacing[index] = float(data_array.coords[dim][1]) - float(data_array.coords[dim][0])
spacing.reverse()
itk_image.SetSpacing(spacing)
origin.reverse()
itk_image.SetOrigin(origin)
if 'direction' in data_array.attrs:
direction = data_array.attrs['direction']
itk_image.SetDirection(np.flip(direction))
return itk_image
def vtk_image_from_image(image):
"""Convert an itk.Image to a vtk.vtkImageData."""
import itk
import vtk
from vtk.util.numpy_support import numpy_to_vtk
array = itk.array_view_from_image(image)
vtk_image = vtk.vtkImageData()
data_array = numpy_to_vtk(array.reshape(-1))
data_array.SetNumberOfComponents(image.GetNumberOfComponentsPerPixel())
data_array.SetName('Scalars')
# Always set Scalars for (future?) multi-component volume rendering
vtk_image.GetPointData().SetScalars(data_array)
dim = image.GetImageDimension()
spacing = [1.0,] * 3
spacing[:dim] = image.GetSpacing()
vtk_image.SetSpacing(spacing)
origin = [0.0,] * 3
origin[:dim] = image.GetOrigin()
vtk_image.SetOrigin(origin)
dims = [1,] * 3
dims[:dim] = itk.size(image)
vtk_image.SetDimensions(dims)
# Todo: Add Direction with VTK 9
if image.GetImageDimension() == 3:
PixelType = itk.template(image)[1][0]
if PixelType == itk.Vector:
vtk_image.GetPointData().SetVectors(data_array)
elif PixelType == itk.CovariantVector:
vtk_image.GetPointData().SetVectors(data_array)
elif PixelType == itk.SymmetricSecondRankTensor:
vtk_image.GetPointData().SetTensors(data_array)
elif PixelType == itk.DiffusionTensor3D:
vtk_image.GetPointData().SetTensors(data_array)
return vtk_image
def image_from_vtk_image(vtk_image):
"""Convert a vtk.vtkImageData to an itk.Image."""
import itk
from vtk.util.numpy_support import vtk_to_numpy
point_data = vtk_image.GetPointData()
array = vtk_to_numpy(point_data.GetScalars())
array = array.reshape(-1)
is_vector = point_data.GetScalars().GetNumberOfComponents() != 1
dims = list(vtk_image.GetDimensions())
if is_vector and dims[-1] == 1:
# 2D
dims = dims[:2]
dims.reverse()
dims.append(point_data.GetScalars().GetNumberOfComponents())
else:
dims.reverse()
array.shape = tuple(dims)
image = itk.image_view_from_array(array, is_vector)
dim = image.GetImageDimension()
spacing = [1.0] * dim
spacing[:dim] = vtk_image.GetSpacing()[:dim]
image.SetSpacing(spacing)
origin = [0.0] * dim
origin[:dim] = vtk_image.GetOrigin()[:dim]
image.SetOrigin(origin)
# Todo: Add Direction with VTK 9
return image
# return an image
from itkTemplate import image, output
def template(cl):
"""Return the template of a class (or of the class of an object) and
its parameters
template() returns a tuple with 2 elements:
- the first one is the itkTemplate object
- the second is a tuple containing the template parameters
"""
from itkTemplate import itkTemplate
return itkTemplate.__class_to_template__[class_(cl)]
def ctype(s):
"""Return the c type corresponding to the string passed in parameter
The string can contain some extra spaces.
see also itkCType
"""
from itkTypes import itkCType
ret = itkCType.GetCType(" ".join(s.split()))
if ret is None:
raise KeyError("Unrecognized C type '%s'" % s)
return ret
def class_(obj):
"""Return a class from an object
Often in itk, the __class__ is not what the user is expecting.
class_() should do a better job
"""
import inspect
if inspect.isclass(obj):
# obj is already a class !
return obj
else:
return obj.__class__
def python_type(obj):
"""Returns the Python type name of an object
The Python name corresponding to the given instantiated object is printed.
This includes both the Python name and the parameters of the object. A user
can copy and paste the printed value to instantiate a new object of the
same type."""
import itkTemplate
from itkTypes import itkCType
def in_itk(name):
import itk
# Remove "itk::" and "std::" from template name.
# Only happens for ITK objects.
shortname = name.split('::')[-1]
shortname = shortname.split('itk')[-1]
namespace = itk
# A type cannot be part of ITK if its name was not modified above. This
# check avoids having an input of type `list` and return `itk.list` that
# also exists.
likely_itk = (shortname != name or name[:3] == 'vnl')
if likely_itk and hasattr(namespace, shortname):
return namespace.__name__ + '.' + shortname # Prepend name with 'itk.'
else:
return name
def recursive(obj, level):
try:
T, P = template(obj)
name = in_itk(T.__name__)
parameters = []
for t in P:
parameters.append(recursive(t, level+1))
return name + "[" + ",".join(parameters) + "]"
except KeyError:
if isinstance(obj, itkCType): # Handles CTypes differently
return 'itk.' + obj.short_name
elif hasattr(obj, "__name__"):
# This should be where most ITK types end up.
return in_itk(obj.__name__)
elif (not isinstance(obj, type)
and type(obj) != itkTemplate.itkTemplate and level != 0):
# obj should actually be considered a value, not a type,
# or it is already an itkTemplate type.
# A value can be an integer that is a template parameter.
# This does not happen at the first level of the recursion
# as it is not possible that this object would be a template
# parameter. Checking the level `0` allows e.g. to find the
# type of an object that is a `list` or an `int`.
return str(obj)
else:
return in_itk(type(obj).__name__)
return recursive(obj, 0)
def range(image_or_filter):
"""Return the range of values in a image of in the output image of a filter
The minimum and maximum values are returned in a tuple: (min, max)
range() take care of updating the pipeline
"""
import itk
img = output(image_or_filter)
img.UpdateOutputInformation()
img.Update()
# don't put that calculator in the automatic pipeline
tmp_auto_pipeline = auto_pipeline.current
auto_pipeline.current = None
comp = itk.MinimumMaximumImageCalculator[img].New(Image=img)
auto_pipeline.current = tmp_auto_pipeline
comp.Compute()
return (comp.GetMinimum(), comp.GetMaximum())
def imwrite(image_or_filter, filename, compression=False):
"""Write a image or the output image of a filter to a file.
The writer is instantiated with the image type of the image in
parameter (or, again, with the output image of the filter in parameter).
"""
import itk
img = output(image_or_filter)
img.UpdateOutputInformation()
# don't put that writer in the automatic pipeline
tmp_auto_pipeline = auto_pipeline.current
auto_pipeline.current = None
writer = itk.ImageFileWriter[type(img)].New(
Input=img,
FileName=filename,
UseCompression=compression)
auto_pipeline.current = tmp_auto_pipeline
writer.Update()
def imread(filename, pixel_type=None, fallback_only=False):
"""Read an image from a file or series of files and return an itk.Image.
The reader is instantiated with the image type of the image file if
`pixel_type` is not provided (default). The dimension of the image is
automatically found. If the given filename is a list or a tuple, the
reader will use an itk.ImageSeriesReader object to read the files.
If `fallback_only` is set to `True`, `imread()` will first try to
automatically deduce the image pixel_type, and only use the given
`pixel_type` if automatic deduction fails. Failures typically
happen if the pixel type is not supported (e.g. it is not currently
wrapped).
"""
import itk
if fallback_only == True:
if pixel_type is None:
raise Exception("pixel_type must be set when using the fallback_only option")
try:
return imread(filename)
except KeyError:
pass
if type(filename) in [list, tuple]:
TemplateReaderType=itk.ImageSeriesReader
io_filename=filename[0]
increase_dimension=True
kwargs={'FileNames':filename}
else:
TemplateReaderType=itk.ImageFileReader
io_filename=filename
increase_dimension=False
kwargs={'FileName':filename}
if pixel_type:
imageIO = itk.ImageIOFactory.CreateImageIO(io_filename, itk.CommonEnums.IOFileMode_ReadMode)
if not imageIO:
raise RuntimeError("No ImageIO is registered to handle the given file.")
imageIO.SetFileName(io_filename)
imageIO.ReadImageInformation()
dimension = imageIO.GetNumberOfDimensions()
# Increase dimension if last dimension is not of size one.
if increase_dimension and imageIO.GetDimensions(dimension-1) != 1:
dimension += 1
ImageType = itk.Image[pixel_type, dimension]
reader = TemplateReaderType[ImageType].New(**kwargs)
else:
reader = TemplateReaderType.New(**kwargs)
reader.Update()
return reader.GetOutput()
def meshwrite(mesh, filename, compression=False):
"""Write a mesh to a file.
The writer is instantiated according to the type of the input mesh.
"""
import itk
mesh.UpdateOutputInformation()
# don't put that writer in the automatic pipeline
tmp_auto_pipeline = auto_pipeline.current
auto_pipeline.current = None
writer = itk.MeshFileWriter[type(mesh)].New(
Input=mesh,
FileName=filename,
UseCompression=compression)
auto_pipeline.current = tmp_auto_pipeline
writer.Update()
def meshread(filename, pixel_type=None, fallback_only=False):
"""Read a mesh from a file and return an itk.Mesh.
The reader is instantiated with the mesh type of the mesh file if
`pixel_type` is not provided (default). The dimension of the mesh is
automatically found.
If `fallback_only` is set to `True`, `meshread()` will first try to
automatically deduce the image pixel_type, and only use the given
`pixel_type` if automatic deduction fails. Failures typically
happen if the pixel type is not supported (e.g. it is not currently
wrapped).
"""
import itk
if fallback_only == True:
if pixel_type is None:
raise Exception("pixel_type must be set when using the fallback_only option")
try:
return meshread(filename)
except KeyError:
pass
TemplateReaderType=itk.MeshFileReader
io_filename=filename
increase_dimension=False
kwargs={'FileName':filename}
if pixel_type:
meshIO = itk.MeshIOFactory.CreateMeshIO(io_filename, itk.CommonEnums.IOFileMode_ReadMode)
if not meshIO:
raise RuntimeError("No MeshIO is registered to handle the given file.")
meshIO.SetFileName(io_filename)
meshIO.ReadMeshInformation()
dimension = meshIO.GetPointDimension()
# Increase dimension if last dimension is not of size one.
if increase_dimension and meshIO.GetDimensions(dimension-1) != 1:
dimension += 1
MeshType = itk.Mesh[pixel_type, dimension]
reader = TemplateReaderType[MeshType].New(**kwargs)
else:
reader = TemplateReaderType.New(**kwargs)
reader.Update()
return reader.GetOutput()
def search(s, case_sensitive=False): # , fuzzy=True):
"""Search for a class name in the itk module.
"""
s = s.replace(" ", "")
if not case_sensitive:
s = s.lower()
import itk
names = sorted(dir(itk))
# exact match first
if case_sensitive:
res = [n for n in names if s == n]
else:
res = [n for n in names if s == n.lower()]
# then exact match inside the name
if case_sensitive:
res += [n for n in names if s in n and s != n]
else:
res += [n for n in names if s in n.lower() and s != n.lower()]
# if fuzzy:
# try:
# everything now requires editdist
# import editdist
# if case_sensitive:
# res.sort(key=lambda x: editdist.distance(x, s))
# else:
# res.sort(key=lambda x: (editdist.distance(x.lower(), s), x))
# except:
# pass
return res
# Helpers for set_inputs snake case to CamelCase keyword argument conversion
_snake_underscore_re = re.compile('(_)([a-z0-9A-Z])')
def _underscore_upper(matchobj):
return matchobj.group(2).upper()
def _snake_to_camel(keyword):
camel = keyword[0].upper()
if _snake_underscore_re.search(keyword[1:]):
return camel + _snake_underscore_re.sub(_underscore_upper, keyword[1:])
return camel + keyword[1:]
def set_inputs(new_itk_object, args=[], kargs={}):
"""Set the inputs of the given objects, according to the non named or the
named parameters in args and kargs
This function tries to assign all the non named parameters in the input of
the new_itk_object
- the first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name
prefixed by 'Set'.
set_inputs( obj, kargs={'Threshold': 10} ) calls obj.SetThreshold(10)
This is the function use in the enhanced New() method to manage the inputs.
It can be used to produce a similar behavior:
def SetInputs(self, *args, **kargs):
import itk
itk.set_inputs(self, *args, **kargs)
"""
# try to get the images from the filters in args
args = [output(arg) for arg in args]
# args without name are filter used to set input image
#
# count SetInput calls to call SetInput, SetInput2, SetInput3, ...
# useful with filter which take 2 input (or more) like SubtractImageFiler
# Ex: subtract image2.png to image1.png and save the result in result.png
# r1 = itk.ImageFileReader.US2.New(FileName='image1.png')
# r2 = itk.ImageFileReader.US2.New(FileName='image2.png')
# s = itk.SubtractImageFilter.US2US2US2.New(r1, r2)
# itk.ImageFileWriter.US2.New(s, FileName='result.png').Update()
try:
for setInputNb, arg in enumerate(args):
methodName = 'SetInput%i' % (setInputNb + 1)
if methodName in dir(new_itk_object):
# first try to use methods called SetInput1, SetInput2, ...
# those method should have more chances to work in case of
# multiple input types
getattr(new_itk_object, methodName)(arg)
else:
# no method called SetInput?
# try with the standard SetInput(nb, input)
new_itk_object.SetInput(setInputNb, arg)
except TypeError as e:
# the exception have (at least) to possible reasons:
# + the filter don't take the input number as first argument
# + arg is an object of wrong type
#
# if it's not the first input, re-raise the exception
if setInputNb != 0:
raise e
# it's the first input, try to use the SetInput() method without input
# number
new_itk_object.SetInput(args[0])
# but raise an exception if there is more than 1 argument
if len(args) > 1:
raise TypeError('Object accepts only 1 input.')
except AttributeError:
# There is no SetInput() method, try SetImage
# but before, check the number of inputs
if len(args) > 1:
raise TypeError('Object accepts only 1 input.')
methodList = ['SetImage', 'SetInputImage']
methodName = None
for m in methodList:
if m in dir(new_itk_object):
methodName = m
if methodName:
getattr(new_itk_object, methodName)(args[0])
else:
raise AttributeError('No method found to set the input.')
# named args : name is the function name, value is argument(s)
for attribName, value in kargs.items():
# use Set as prefix. It allow to use a shorter and more intuitive
# call (Ex: itk.ImageFileReader.UC2.New(FileName='image.png')) than
# with the full name
# (Ex: itk.ImageFileReader.UC2.New(SetFileName='image.png'))
if attribName not in ["auto_progress", "template_parameters"]:
if attribName.islower():
attribName = _snake_to_camel(attribName)
attrib = getattr(new_itk_object, 'Set' + attribName)
# Do not use try-except mechanism as this leads to
# segfaults. Instead limit the number of types that are
# tested. The list of tested type could maybe be replaced by
# a test that would check for iterables.
if type(value) in [list, tuple]:
try:
output_value = [output(x) for x in value]
attrib(*output_value)
except:
attrib(output(value))
else:
attrib(output(value))
class templated_class:
"""This class is used to mimic the behavior of the templated C++ classes.
It is used this way:
class CustomClass:
# class definition here
CustomClass = templated_class(CustomClass)
customObject = CustomClass[template, parameters].New()
The template parameters are passed to the custom class constructor as a
named parameter 'template_parameters' in a tuple.
The custom class may implement a static method
check_template_parameters(parameters) which should raise an exception if
the template parameters provided are not suitable to instantiate the custom
class.
"""
def __init__(self, cls):
"""cls is the custom class
"""
self.__cls__ = cls
self.__templates__ = {}
def New(self, *args, **kargs):
"""Use the parameters to infer the types of the template parameters.
"""
# extract the types from the arguments to instantiate the class
import itk
types = tuple(itk.class_(o) for o in args)
return self[types].New(*args, **kargs)
def __getitem__(self, template_parameters):
"""Return a pair class-template parameters ready to be instantiated.
The template parameters may be validated if the custom class provide
the static method check_template_parameters(parameters).
"""
if not isinstance(template_parameters, tuple):
template_parameters = (template_parameters,)
return (
templated_class.__templated_class_and_parameters__(
self,
template_parameters)
)
def check_template_parameters(self, template_parameters):
"""Check the template parameters passed in parameter.
"""
# this method is there mainly to make possible to reuse it in the
# custom class constructor after having used templated_class().
# Without that, the following example doesn't work:
#
# class CustomClass:
# def __init__(self, *args, **kargs):
# template_parameters = kargs["template_parameters"]
# CustomClass.check_template_parameters(template_parameters)
# other init stuff
# def check_template_parameters(template_parameters):
# check, really
# pass
# CustomClass = templated_class(CustomClass)
#
self.__cls__.check_template_parameters(template_parameters)
def add_template(self, name, params):
if not isinstance(params, list) and not isinstance(params, tuple):
params = (params,)
params = tuple(params)
val = self[params]
self.__templates__[params] = val
setattr(self, name, val)
def add_image_templates(self, *args):
import itk
if args == []:
return
combinations = [[t] for t in args[0]]
for types in args[1:]:
temp = []
for t in types:
for c in combinations:
temp.append(c + [t])
combinations = temp
for d in itk.DIMS:
for c in combinations:
parameters = []
name = ""
for t in c:
parameters.append(itk.Image[t, d])
name += "I" + t.short_name + str(d)
self.add_template(name, tuple(parameters))
class __templated_class_and_parameters__:
"""Inner class used to store the pair class-template parameters ready
to instantiate.
"""
def __init__(self, templated_class, template_parameters):
self.__templated_class__ = templated_class
self.__template_parameters__ = template_parameters
if "check_template_parameters" in dir(templated_class.__cls__):
templated_class.__cls__.check_template_parameters(
template_parameters)
def New(self, *args, **kargs):
"""A New() method to mimic the ITK default behavior, even if the
class doesn't provide any New() method.
"""
kargs["template_parameters"] = self.__template_parameters__
if "New" in dir(self.__templated_class__.__cls__):
obj = self.__templated_class__.__cls__.New(*args, **kargs)
else:
obj = self.__templated_class__.__cls__(*args, **kargs)
setattr(
obj,
"__template_parameters__",
self.__template_parameters__)
setattr(obj, "__templated_class__", self.__templated_class__)
return obj
def __call__(self, *args, **kargs):
return self.New(*args, **kargs)
def keys(self):
return self.__templates__.keys()
# everything after this comment is for dict interface
# and is a copy/paste from DictMixin
# only methods to edit dictionary are not there
def __iter__(self):
for k in self.keys():
yield k
def has_key(self, key):
try:
value = self[key]
except KeyError:
return False
return True
def __contains__(self, key):
return key in self
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _, v in self.iteritems():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return list(self.iteritems())
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __len__(self):
return len(self.keys())
class pipeline:
"""A convenient class to store the reference to the filters of a pipeline
With this class, a method can create a pipeline of several filters and
return it without losing the references to the filters in this pipeline.
The pipeline object act almost like a filter (it has a GetOutput() method)
and thus can be simply integrated in another pipeline.
"""
def __init__(self, *args, **kargs):
self.clear()
self.input = None
set_inputs(self, args, kargs)
def connect(self, filter):
"""Connect a new filter to the pipeline
The output of the first filter will be used as the input of this
one and the filter passed as parameter will be added to the list
"""
if self.GetOutput() is not None:
set_inputs(filter, [self.GetOutput()])
self.append(filter)
def append(self, filter):
"""Add a new filter to the pipeline
The new filter will not be connected. The user must connect it.
"""
self.filters.append(filter)
def clear(self):
"""Clear the filter list
"""
self.filters = []
def GetOutput(self, index=0):
"""Return the output of the pipeline
If another output is needed, use
pipeline.filters[-1].GetAnotherOutput() instead of this method,
subclass pipeline to implement another GetOutput() method, or use
expose()
"""
if len(self.filters) == 0:
return self.GetInput()
else:
filter = self.filters[-1]
if hasattr(filter, "__getitem__"):
return filter[index]
try:
return filter.GetOutput(index)
except:
if index == 0:
return filter.GetOutput()
else:
raise ValueError("Index can only be 0 on that object")
def GetNumberOfOutputs(self):
"""Return the number of outputs
"""
if len(self.filters) == 0:
return 1
else:
return self.filters[-1].GetNumberOfOutputs()
def SetInput(self, input):
"""Set the input of the pipeline
"""
if len(self.filters) != 0:
set_inputs(self.filters[0], [input])
self.input = input
def GetInput(self):
"""Get the input of the pipeline
"""
return self.input
def Update(self):
"""Update the pipeline
"""
if len(self.filters) > 0:
return self.filters[-1].Update()
def UpdateLargestPossibleRegion(self):
"""Update the pipeline
"""
if len(self.filters) > 0:
return self.filters[-1].UpdateLargestPossibleRegion()
def UpdateOutputInformation(self):
if "UpdateOutputInformation" in dir(self.filters[-1]):
self.filters[-1].UpdateOutputInformation()
else:
self.Update()
def __len__(self):
return self.GetNumberOfOutputs()
def __getitem__(self, item):
return self.GetOutput(item)
def __call__(self, *args, **kargs):
set_inputs(self, args, kargs)
self.UpdateLargestPossibleRegion()
return self
def expose(self, name, new_name=None, position=-1):
"""Expose an attribute from a filter of the minipeline.
Once called, the pipeline instance has a new Set/Get set of methods to
access directly the corresponding method of one of the filter of the
pipeline.
Ex: p.expose( "Radius" )
p.SetRadius( 5 )
p.GetRadius( 5 )
By default, the attribute usable on the pipeline instance has the same
name than the one of the filter, but it can be changed by providing a
value to new_name.
The last filter of the pipeline is used by default, but another one may
be used by giving its position.
Ex: p.expose("Radius", "SmoothingNeighborhood", 2)
p.GetSmoothingNeighborhood()
"""
if new_name is None:
new_name = name
src = self.filters[position]
ok = False
set_name = "Set" + name
if set_name in dir(src):
setattr(self, "Set" + new_name, getattr(src, set_name))
ok = True
get_name = "Get" + name
if get_name in dir(src):
setattr(self, "Get" + new_name, getattr(src, get_name))
ok = True
if not ok:
raise RuntimeError(
"No attribute %s at position %s." %
(name, position))
class auto_pipeline(pipeline):
current = None
def __init__(self, *args, **kargs):
pipeline.__init__(self, *args, **kargs)
self.Start()
def Start(self):
auto_pipeline.current = self
def Stop(self):
auto_pipeline.current = None
def down_cast(obj):
"""Down cast an itkLightObject (or a object of a subclass) to its most
specialized type.
"""
import itk
import itkTemplate
className = obj.GetNameOfClass()
t = getattr(itk, className)
if isinstance(t, itkTemplate.itkTemplate):
for c in t.values():
try:
return c.cast(obj)
except:
# fail silently for now
pass
raise RuntimeError(
"Can't downcast to a specialization of %s" %
className)
else:
return t.cast(obj)
def attribute_list(i, name):
"""Returns a list of the specified attributes for the objects in the image.
i: the input LabelImage
name: the attribute name
"""
import itk
i = itk.output(i)
relabel = itk.StatisticsRelabelLabelMapFilter[i].New(
i,
Attribute=name,
ReverseOrdering=True,
InPlace=False)
relabel.UpdateLargestPossibleRegion()
r = relabel.GetOutput()
l = []
for i in range(1, r.GetNumberOfLabelObjects() + 1):
l.append(r.GetLabelObject(i).__getattribute__("Get" + name)())
return l
def attributes_list(i, names):
"""Returns a list of the specified attributes for the objects in the image.
i: the input LabelImage
name: the attribute name
"""
import itk
i = itk.output(i)
relabel = itk.StatisticsRelabelLabelMapFilter[i].New(
i,
Attribute=names[0],
ReverseOrdering=True,
InPlace=False)
relabel.UpdateLargestPossibleRegion()
r = relabel.GetOutput()
l = []
for i in range(1, r.GetNumberOfLabelObjects() + 1):
attrs = []
for name in names:
attrs.append(r.GetLabelObject(i).__getattribute__("Get" + name)())
l.append(tuple(attrs))
return l
def attribute_dict(i, name):
"""Returns a dict with the attribute values in keys and a list of the
corresponding objects in value
i: the input LabelImage
name: the name of the attribute
"""
import itk
i = itk.output(i)
relabel = itk.StatisticsRelabelLabelMapFilter[i].New(
i,
Attribute=name,
ReverseOrdering=True,
InPlace=False)
relabel.UpdateLargestPossibleRegion()
r = relabel.GetOutput()
d = {}
for i in range(1, r.GetNumberOfLabelObjects() + 1):
lo = r.GetLabelObject(i)
v = lo.__getattribute__("Get" + name)()
l = d.get(v, [])
l.append(lo)
d[v] = l
return d
def number_of_objects(i):
"""Returns the number of objets in the image.
i: the input LabelImage
"""
import itk
i.UpdateLargestPossibleRegion()
i = itk.output(i)
return i.GetNumberOfLabelObjects()
def ipython_kw_matches(text):
"""Match named ITK object's named parameters"""
import IPython
import itk
import re
import inspect
import itkTemplate
regexp = re.compile(r'''
'.*?' | # single quoted strings or
".*?" | # double quoted strings or
\w+ | # identifier
\S # other characters
''', re.VERBOSE | re.DOTALL)
ip = IPython.get_ipython()
if "." in text: # a parameter cannot be dotted
return []
# 1. Find the nearest identifier that comes before an unclosed
# parenthesis e.g. for "foo (1+bar(x), pa", the candidate is "foo".
if ip.Completer.readline:
textUntilCursor = ip.Completer.readline.get_line_buffer()[:ip.Completer.readline.get_endidx()]
else:
# IPython >= 5.0.0, which is based on the Python Prompt Toolkit
textUntilCursor = ip.Completer.text_until_cursor
tokens = regexp.findall(textUntilCursor)
tokens.reverse()
iterTokens = iter(tokens)
openPar = 0
for token in iterTokens:
if token == ')':
openPar -= 1
elif token == '(':
openPar += 1
if openPar > 0:
# found the last unclosed parenthesis
break
else:
return []
# 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
ids = []
isId = re.compile(r'\w+$').match
while True:
try:
ids.append(iterTokens.next())
if not isId(ids[-1]):
ids.pop()
break
if not iterTokens.next() == '.':
break
except StopIteration:
break
# lookup the candidate callable matches either using global_matches
# or attr_matches for dotted names
if len(ids) == 1:
callableMatches = ip.Completer.global_matches(ids[0])
else:
callableMatches = ip.Completer.attr_matches('.'.join(ids[::-1]))
argMatches = []
for callableMatch in callableMatches:
# drop the .New at this end, so we can search in the class members
if callableMatch.endswith(".New"):
callableMatch = callableMatch[:-4]
elif not re.findall('([A-Z])', callableMatch): # True if snake case
# Split at the last '.' occurence
splitted = callableMatch.split('.')
namespace = splitted[:-1]
function_name = splitted[-1]
# Find corresponding object name
object_name = _snake_to_camel(function_name)
# Check that this object actually exists
try:
objectCallableMatch = ".".join(namespace + [object_name])
eval(objectCallableMatch, ip.Completer.namespace)
# Reconstruct full object name
callableMatch = objectCallableMatch
except AttributeError:
# callableMatch is not a snake case function with a
# corresponding object.
pass
try:
object = eval(callableMatch, ip.Completer.namespace)
if isinstance(object, itkTemplate.itkTemplate):
# this is a template - lets grab the first entry to search for
# the methods
object = object.values()[0]
namedArgs = []
isin = isinstance(object, itk.LightObject)
if inspect.isclass(object):
issub = issubclass(object, itk.LightObject)
if isin or (inspect.isclass(object) and issub):
namedArgs = [n[3:] for n in dir(object) if n.startswith("Set")]
except Exception as e:
print(e)
continue
for namedArg in namedArgs:
if namedArg.startswith(text):
argMatches.append(u"%s=" % namedArg)
return argMatches
# install progress callback and custom completer if we are in ipython
# interpreter
try:
import itkConfig
import IPython
if IPython.get_ipython():
IPython.get_ipython().Completer.matchers.insert(0, ipython_kw_matches)
# some cleanup
del itkConfig, IPython
except (ImportError, AttributeError):
# fail silently
pass
|
richardbeare/ITK
|
Wrapping/Generators/Python/itkExtras.py
|
Python
|
apache-2.0
| 50,065
|
[
"VTK"
] |
6a844b00e00319156986da368cbaaa7dde7fc38924e83eedda96e0113d09e661
|
#!/usr/bin/env python
import os
import opendrift
if not os.path.exists('openoil.nc'):
raise ValueError('Please run example.py first to generate a '
'netCDF file to be imported.')
o = opendrift.open('openoil.nc')
print(o)
o.plot()
o.plot_property('mass_oil')
|
knutfrode/opendrift
|
examples/example_import.py
|
Python
|
gpl-2.0
| 289
|
[
"NetCDF"
] |
bafc2d93668ca316c0446b429028dc7c72334c4353629980ca612f449adb6960
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Methods for selecting the bin width of histograms
Ported from the astroML project: http://astroML.org/
"""
import numpy as np
from . import bayesian_blocks
__all__ = ['histogram', 'scott_bin_width', 'freedman_bin_width',
'knuth_bin_width']
def histogram(a, bins=10, range=None, weights=None, **kwargs):
"""Enhanced histogram function, providing adaptive binnings
This is a histogram function that enables the use of more sophisticated
algorithms for determining bins. Aside from the ``bins`` argument allowing
a string specified how bins are computed, the parameters are the same
as ``numpy.histogram()``.
Parameters
----------
a : array_like
array of data to be histogrammed
bins : int or list or str (optional)
If bins is a string, then it must be one of:
- 'blocks' : use bayesian blocks for dynamic bin widths
- 'knuth' : use Knuth's rule to determine bins
- 'scott' : use Scott's rule to determine bins
- 'freedman' : use the Freedman-Diaconis rule to determine bins
range : tuple or None (optional)
the minimum and maximum range for the histogram. If not specified,
it will be (x.min(), x.max())
weights : array_like, optional
Not Implemented
other keyword arguments are described in numpy.histogram().
Returns
-------
hist : array
The values of the histogram. See ``normed`` and ``weights`` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
numpy.histogram
"""
# if bins is a string, first compute bin edges with the desired heuristic
if isinstance(bins, str):
a = np.asarray(a).ravel()
# TODO: if weights is specified, we need to modify things.
# e.g. we could use point measures fitness for Bayesian blocks
if weights is not None:
raise NotImplementedError("weights are not yet supported "
"for the enhanced histogram")
# if range is specified, we need to truncate the data for
# the bin-finding routines
if range is not None:
a = a[(a >= range[0]) & (a <= range[1])]
if bins == 'blocks':
bins = bayesian_blocks(a)
elif bins == 'knuth':
da, bins = knuth_bin_width(a, True)
elif bins == 'scott':
da, bins = scott_bin_width(a, True)
elif bins == 'freedman':
da, bins = freedman_bin_width(a, True)
else:
raise ValueError("unrecognized bin code: '{}'".format(bins))
# Now we call numpy's histogram with the resulting bin edges
return np.histogram(a, bins=bins, range=range, weights=weights, **kwargs)
def scott_bin_width(data, return_bins=False):
r"""Return the optimal histogram bin width using Scott's rule
Scott's rule is a normal reference rule: it minimizes the integrated
mean squared error in the bin approximation under the assumption that the
data is approximately Gaussian.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool (optional)
if True, then return the bin edges
Returns
-------
width : float
optimal bin width using Scott's rule
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal bin width is
.. math::
\Delta_b = \frac{3.5\sigma}{n^{1/3}}
where :math:`\sigma` is the standard deviation of the data, and
:math:`n` is the number of data points [1]_.
References
----------
.. [1] Scott, David W. (1979). "On optimal and data-based histograms".
Biometricka 66 (3): 605-610
See Also
--------
knuth_bin_width
freedman_bin_width
bayesian_blocks
histogram
"""
data = np.asarray(data)
if data.ndim != 1:
raise ValueError("data should be one-dimensional")
n = data.size
sigma = np.std(data)
dx = 3.5 * sigma / (n ** (1 / 3))
if return_bins:
Nbins = np.ceil((data.max() - data.min()) / dx)
Nbins = max(1, Nbins)
bins = data.min() + dx * np.arange(Nbins + 1)
return dx, bins
else:
return dx
def freedman_bin_width(data, return_bins=False):
r"""Return the optimal histogram bin width using the Freedman-Diaconis rule
The Freedman-Diaconis rule is a normal reference rule like Scott's
rule, but uses rank-based statistics for results which are more robust
to deviations from a normal distribution.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool (optional)
if True, then return the bin edges
Returns
-------
width : float
optimal bin width using the Freedman-Diaconis rule
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal bin width is
.. math::
\Delta_b = \frac{2(q_{75} - q_{25})}{n^{1/3}}
where :math:`q_{N}` is the :math:`N` percent quartile of the data, and
:math:`n` is the number of data points [1]_.
References
----------
.. [1] D. Freedman & P. Diaconis (1981)
"On the histogram as a density estimator: L2 theory".
Probability Theory and Related Fields 57 (4): 453-476
See Also
--------
knuth_bin_width
scott_bin_width
bayesian_blocks
histogram
"""
data = np.asarray(data)
if data.ndim != 1:
raise ValueError("data should be one-dimensional")
n = data.size
if n < 4:
raise ValueError("data should have more than three entries")
v25, v75 = np.percentile(data, [25, 75])
dx = 2 * (v75 - v25) / (n ** (1 / 3))
if return_bins:
dmin, dmax = data.min(), data.max()
Nbins = max(1, np.ceil((dmax - dmin) / dx))
bins = dmin + dx * np.arange(Nbins + 1)
return dx, bins
else:
return dx
def knuth_bin_width(data, return_bins=False, quiet=True):
r"""Return the optimal histogram bin width using Knuth's rule.
Knuth's rule is a fixed-width, Bayesian approach to determining
the optimal bin width of a histogram.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool (optional)
if True, then return the bin edges
quiet : bool (optional)
if True (default) then suppress stdout output from scipy.optimize
Returns
-------
dx : float
optimal bin width. Bins are measured starting at the first data point.
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal number of bins is the value M which maximizes the function
.. math::
F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2})
- M\log\Gamma(\frac{1}{2})
- \log\Gamma(\frac{2n+M}{2})
+ \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2})
where :math:`\Gamma` is the Gamma function, :math:`n` is the number of
data points, :math:`n_k` is the number of measurements in bin :math:`k`
[1]_.
References
----------
.. [1] Knuth, K.H. "Optimal Data-Based Binning for Histograms".
arXiv:0605197, 2006
See Also
--------
freedman_bin_width
scott_bin_width
bayesian_blocks
histogram
"""
# import here because of optional scipy dependency
from scipy import optimize
knuthF = _KnuthF(data)
dx0, bins0 = freedman_bin_width(data, True)
M = optimize.fmin(knuthF, len(bins0), disp=not quiet)[0]
bins = knuthF.bins(M)
dx = bins[1] - bins[0]
if return_bins:
return dx, bins
else:
return dx
class _KnuthF:
r"""Class which implements the function minimized by knuth_bin_width
Parameters
----------
data : array-like, one dimension
data to be histogrammed
Notes
-----
the function F is given by
.. math::
F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2})
- M\log\Gamma(\frac{1}{2})
- \log\Gamma(\frac{2n+M}{2})
+ \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2})
where :math:`\Gamma` is the Gamma function, :math:`n` is the number of
data points, :math:`n_k` is the number of measurements in bin :math:`k`.
See Also
--------
knuth_bin_width
"""
def __init__(self, data):
self.data = np.array(data, copy=True)
if self.data.ndim != 1:
raise ValueError("data should be 1-dimensional")
self.data.sort()
self.n = self.data.size
# import here rather than globally: scipy is an optional dependency.
# Note that scipy is imported in the function which calls this,
# so there shouldn't be any issue importing here.
from scipy import special
# create a reference to gammaln to use in self.eval()
self.gammaln = special.gammaln
def bins(self, M):
"""Return the bin edges given a width dx"""
return np.linspace(self.data[0], self.data[-1], int(M) + 1)
def __call__(self, M):
return self.eval(M)
def eval(self, M):
"""Evaluate the Knuth function
Parameters
----------
dx : float
Width of bins
Returns
-------
F : float
evaluation of the negative Knuth likelihood function:
smaller values indicate a better fit.
"""
M = int(M)
if M <= 0:
return np.inf
bins = self.bins(M)
nk, bins = np.histogram(self.data, bins)
return -(self.n * np.log(M) +
self.gammaln(0.5 * M) -
M * self.gammaln(0.5) -
self.gammaln(self.n + 0.5 * M) +
np.sum(self.gammaln(nk + 0.5)))
|
funbaker/astropy
|
astropy/stats/histogram.py
|
Python
|
bsd-3-clause
| 10,043
|
[
"Gaussian"
] |
7e5b44e45cab1846754f344d45f9a03e2a140c511445b28b6571316019f948b6
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unsupervised Kernel Regression (UKR) for Python.
Implemented as a scikit-learn module.
Author: Christoph Hermes
Created on Januar 16, 2015 18:48:22
The MIT License (MIT)
Copyright (c) 2015 Christoph Hermes
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
from scipy.optimize import minimize
import sklearn
from sklearn import decomposition, manifold
from scipy.linalg import sqrtm
# own modules
from ukr_core import (ukr_bp, ukr_dY, ukr_E, ukr_project,
ukr_backproject_particles)
import rprop
# possible UKR kernels: tuple(kernel, kernel derivative)
try: # try using numexpr
import numexpr as ne
gaussian = (lambda x: ne.evaluate('exp(-.5 * x)'), lambda x: ne.evaluate('-.5 * exp(-.5 * x)'))
quartic = (lambda x: np.where(x<1, (1. - x)**2, np.zeros_like(x)), lambda x: np.where(x<1, -2. * (1. - x), np.zeros_like(x)))
student_n = (lambda x, n: ne.evaluate('(1. + x/n)**(-(n+1.)/2.)'), lambda x, n: ne.evaluate('-(n+1.)/2. * n**((n+1.)/2.) * (x+n)**(-(n+1.)/2.-1.)') )
except ImportError:
gaussian = (lambda x: np.exp(-.5 * x), lambda x: -.5 * np.exp(-.5 * x))
quartic = (lambda x: np.where(x<1, (1. - x)**2, np.zeros_like(x)), lambda x: np.where(x<1, -2. * (1. - x), np.zeros_like(x)))
student_n = (lambda x, n: (1. + x/n)**(-(n+1.)/2.), lambda x, n: -(n+1.)/2. * n**((n+1.)/2.) * (x+n)**(-(n+1.)/2.-1.) )
student_1 = (lambda x: student_n[0](x, 1), lambda x: student_n[1](x, 1))
student_2 = (lambda x: student_n[0](x, 2), lambda x: student_n[1](x, 2))
student_3 = (lambda x: student_n[0](x, 3), lambda x: student_n[1](x, 3))
student_9 = (lambda x: student_n[0](x, 9), lambda x: student_n[1](x, 9))
student_k = lambda k: (lambda x: student_n[0](x, k), lambda x: student_n[1](x, k))
class UKR(sklearn.base.BaseEstimator, sklearn.base.TransformerMixin):
"""Unsupervised Kernel Regression (UKR)
Parameters
----------
n_components : int
Manifold dimension, usually in {1,2,3}.
kernel : str or tuple(k : func(x), k_der : func(x))
UKR kernel `k` and its derivative `k_der`. A few examples are included
in this module: gaussian, quartic and student_{1,2,3,9}.
metric : {L1, L2} or float
Distance metric.
L1: cityblock/manhattan; L2: euclidean
float : arbitrary Minkowsky
n_iter : int
Maximum number of iterations for training the UKR model.
lko_cv : int
Leave-k-out cross validation for training the UKR model.
embeddings : list of initial manifold generators
If None, the initial embedding is set to TSNE and then PCA (if TSNE is
not available).
Good choices are:
* sklearn.decomposition.PCA(`n_components`)
* sklearn.decomposition.KernelPCA(`n_components`, kernel='rbf')
* sklearn.manifold.locally_linear.LocallyLinearEmbedding(n_neighbors, `n_components`, method='modified')
* sklearn.manifold.MDS(n_components=`n_components`, n_jobs=-1),
* sklearn.manifold.TSNE(n_components=`n_components`),
enforceCycle : bool
Are the high-dimensional points sampled from a cyclic data, e.g. a
rotating object or a walking person? In this case the UKR tries to
maintain a close spatial distance of subsequent manifold points.
verbose : bool
Print additional information esp. during the training stage.
Attributes
----------
X : np.ndarray, shape=(N,D)
High-dimensional point list for UKR training.
Y : np.ndarray, shape=(N,n_components)
Low-dimensional respresentation of `X`.
"""
def __init__(self, n_components=2, kernel=gaussian, metric='L2', lko_cv=1, n_iter=1000, embeddings=None, enforceCycle=False, verbose=True):
if isinstance(kernel, basestring):
if kernel.lower() == 'gaussian':
self.k, self.k_der = gaussian
elif kernel.lower() == 'quartic':
self.k, self.k_der = quartic
elif kernel.lower() == 'student_1':
self.k, self.k_der = student_1
elif kernel.lower() == 'student_2':
self.k, self.k_der = student_2
elif kernel.lower() == 'student_3':
self.k, self.k_der = student_3
elif kernel.lower() == 'student_9':
self.k, self.k_der = student_9
else:
self.k, self.k_der = kernel
if isinstance(metric, basestring):
assert metric in ['L1', 'L2'], "failed condition: metric in ['L1', 'L2']"
if metric == 'L1': self.metric = 1.
elif metric == 'L2': self.metric = 2.
else:
self.metric = metric
self.n_components = n_components
self.lko_cv = lko_cv
self.n_iter = n_iter
self.enforceCycle = enforceCycle
self.verbose = verbose
if embeddings is None:
try:
self.embeddings = [manifold.TSNE(n_components=self.n_components)]
except AttributeError:
print 'ukr.py::Warning: TSNE not found in the sklearn packages. Try PCA instead.'
self.embeddings = [decomposition.PCA(n_components=self.n_components)]
else:
self.embeddings = embeddings
self.X = None
self.Y = None
self.B = None
pass
def fit(self, X, y=None):
"""Train the UKR model.
Parameters
----------
X : np.ndarray, shape=(N,D)
Sample set with `N` elements and `D` dimensions.
Returns
-------
UKR model object.
"""
X = np.atleast_2d(X)
###########################
# find an initial embedding
Y = None
embed_ = None
error = np.inf
for embeddingI, embedding in enumerate(self.embeddings):
if self.verbose:
print 'Try embedding %2d/%2d: %s' % (embeddingI+1, len(self.embeddings), embedding.__class__.__name__)
try:
Y_init_ = embedding.fit_transform(X)
Y_init_ = Y_init_ - Y_init_.mean(axis=0) # center around zero
except:
continue
# normalize initial hypothesis to Y.T * Y = I
Y_init_ = Y_init_.dot(np.linalg.pinv(sqrtm(Y_init_.T.dot(Y_init_))))
# optimze the scaling factor by using least squares
def residuals(p, X_, Y_):
B, P = ukr_bp(Y_ * p, self.k, self.k_der, self.lko_cv, metric=self.metric)
return ukr_E(X_, B)
p0 = np.ones((1,self.n_components))
sol = minimize(residuals, p0, method='Nelder-Mead', args=(X, Y_init_))
if sol['x'].max() < 1000:
Y_init_ = Y_init_ * sol['x']
else:
print 'UKR::warning: scaling initialization failed'
Y_init_ = Y_init_ * 20
# final projection error estimation
B, P = ukr_bp(Y_init_, self.k, self.k_der, self.lko_cv, metric=self.metric)
err_ = ukr_E(X, B)
if self.verbose:
print ' Error: %f' % err_
# store the results if they're an improvement
if err_ < error:
error = err_
Y = Y_init_
embed_ = embedding
# Summary:
if self.verbose:
print '=> using embedding', embed_.__class__.__name__
######################
# Refine the UKR model
iRpropPlus = rprop.iRpropPlus()
for iter in xrange(self.n_iter):
if self.verbose and iter % 10 == 0:
print 'UKR iter %5d, Err=%9.6f' % (iter, iRpropPlus.E_prev)
# derivative of X_model w.r.t. to the error gradient
B, P = ukr_bp(Y, self.k, self.k_der, self.lko_cv, metric=self.metric)
if self.enforceCycle and iter % 20 < 10 and iter < self.n_iter/2:
# close spatial distance of subsequent manifold points every
# ten iterations for the first half of the full training
dY = -np.diff(np.vstack([Y, Y[0]]), axis=0)
else:
dY = ukr_dY(Y, X, B, P)
# reconstruction error
E_cur = ukr_E(X, B) / X.shape[1]
Y = iRpropPlus.update(Y, dY, E_cur)
# store training results
self.X = X # original data
self.Y = Y # manifold points
return self
def fit_transform(self, X, y=None):
"""Train the UKR model and return the low-dimensional samples.
Parameters
----------
X : np.ndarray, shape=(N,D)
Sample set with `N` elements and `D` dimensions.
Returns
-------
Y : np.ndarray, shape=(N, `n_components`)
Low-dimensional representation of `X`.
"""
X = np.atleast_2d(X)
self.fit(X, y)
return self.Y
def transform(self, X, n_particle_iter=100):
"""Project each sample in `X` to the embedding.
Uses a particle set for the optimization.
Parameters
----------
X : np.ndarray, shape=(N,D)
Sample set with `N` elements and `D` dimensions.
Returns
-------
Y : np.ndarray, shape=(N, `n_components`)
Low-dimensional representation of `X`.
"""
X = np.atleast_2d(X)
Y = ukr_backproject_particles(self.Y, self.X, self.k, self.k_der, self.metric, X,
n_particles=self.Y.shape[0], n_iter=n_particle_iter)
return Y
def predict(self, Y):
"""Project a set of manifold points into the orignal space.
Parameters
----------
Y : np.ndarray, shape=(N,`n_components`)
Arbitrary points on the manifold.
Returns
-------
X : np.ndarray, shape=(N,D)
Corresponding samples in the high-dimensional space.
"""
assert self.Y is not None, "untrained UKR model"
Y = np.atleast_2d(Y)
assert Y.shape[1] == self.n_components, \
"failed condition: Y.shape[1] == self.n_components"
B, _ = ukr_bp(self.Y, self.k, self.k_der, diagK=-1, Y=Y, metric=self.metric)
return ukr_project(self.X, B)
def predict_proba(self, Y):
"""Kernel density estimate for each sample.
Parameters
----------
Y : np.ndarray, shape=(N,`n_components`)
Arbitrary points on the manifold.
Returns
-------
p : array-like, shape=(N,)
Estimated density value for each sample.
"""
assert self.Y is not None, "untrained UKR model"
Y = np.atleast_2d(Y)
assert Y.shape[1] == self.n_components, \
"failed condition: Y.shape[1] == self.n_components"
B, _ = ukr_bp(self.Y, self.k, self.k_der, diagK=-1, Y=Y, bNorm=False, metric=self.metric)
return B.mean(axis=0)
pass
|
chermes/python-ukr
|
src_naive/ukr.py
|
Python
|
mit
| 11,957
|
[
"Gaussian"
] |
7a60f86a684e6d7c2c4f1dc31600b4869767fd5463eca03f02e6c954ac7c370c
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A runner implementation that submits a job for remote execution.
The runner will create a JSON description of the job graph and then submit it
to the Dataflow Service for remote execution by a worker.
"""
import logging
import threading
import time
import traceback
from apache_beam import error
from apache_beam import coders
from apache_beam import pvalue
from apache_beam.internal import pickler
from apache_beam.internal.gcp import json_value
from apache_beam.pvalue import AsSideInput
from apache_beam.runners.dataflow.dataflow_metrics import DataflowMetrics
from apache_beam.runners.dataflow.internal import names
from apache_beam.runners.dataflow.internal.clients import dataflow as dataflow_api
from apache_beam.runners.dataflow.internal.names import PropertyNames
from apache_beam.runners.dataflow.internal.names import TransformNames
from apache_beam.runners.runner import PValueCache
from apache_beam.runners.runner import PipelineResult
from apache_beam.runners.runner import PipelineRunner
from apache_beam.runners.runner import PipelineState
from apache_beam.transforms.display import DisplayData
from apache_beam.typehints import typehints
from apache_beam.options.pipeline_options import StandardOptions
__all__ = ['DataflowRunner']
class DataflowRunner(PipelineRunner):
"""A runner that creates job graphs and submits them for remote execution.
Every execution of the run() method will submit an independent job for
remote execution that consists of the nodes reachable from the passed in
node argument or entire graph if node is None. The run() method returns
after the service created the job and will not wait for the job to finish
if blocking is set to False.
"""
# Environment version information. It is passed to the service during a
# a job submission and is used by the service to establish what features
# are expected by the workers.
BATCH_ENVIRONMENT_MAJOR_VERSION = '5'
STREAMING_ENVIRONMENT_MAJOR_VERSION = '0'
def __init__(self, cache=None):
# Cache of CloudWorkflowStep protos generated while the runner
# "executes" a pipeline.
self._cache = cache if cache is not None else PValueCache()
self._unique_step_id = 0
def _get_unique_step_name(self):
self._unique_step_id += 1
return 's%s' % self._unique_step_id
@staticmethod
def poll_for_job_completion(runner, result):
"""Polls for the specified job to finish running (successfully or not)."""
last_message_time = None
last_message_hash = None
last_error_rank = float('-inf')
last_error_msg = None
last_job_state = None
# How long to wait after pipeline failure for the error
# message to show up giving the reason for the failure.
# It typically takes about 30 seconds.
final_countdown_timer_secs = 50.0
sleep_secs = 5.0
# Try to prioritize the user-level traceback, if any.
def rank_error(msg):
if 'work item was attempted' in msg:
return -1
elif 'Traceback' in msg:
return 1
return 0
job_id = result.job_id()
while True:
response = runner.dataflow_client.get_job(job_id)
# If get() is called very soon after Create() the response may not contain
# an initialized 'currentState' field.
if response.currentState is not None:
if response.currentState != last_job_state:
logging.info('Job %s is in state %s', job_id, response.currentState)
last_job_state = response.currentState
if str(response.currentState) != 'JOB_STATE_RUNNING':
# Stop checking for new messages on timeout, explanatory
# message received, success, or a terminal job state caused
# by the user that therefore doesn't require explanation.
if (final_countdown_timer_secs <= 0.0
or last_error_msg is not None
or str(response.currentState) == 'JOB_STATE_DONE'
or str(response.currentState) == 'JOB_STATE_CANCELLED'
or str(response.currentState) == 'JOB_STATE_UPDATED'
or str(response.currentState) == 'JOB_STATE_DRAINED'):
break
# The job has failed; ensure we see any final error messages.
sleep_secs = 1.0 # poll faster during the final countdown
final_countdown_timer_secs -= sleep_secs
time.sleep(sleep_secs)
# Get all messages since beginning of the job run or since last message.
page_token = None
while True:
messages, page_token = runner.dataflow_client.list_messages(
job_id, page_token=page_token, start_time=last_message_time)
for m in messages:
message = '%s: %s: %s' % (m.time, m.messageImportance, m.messageText)
m_hash = hash(message)
if last_message_hash is not None and m_hash == last_message_hash:
# Skip the first message if it is the last message we got in the
# previous round. This can happen because we use the
# last_message_time as a parameter of the query for new messages.
continue
last_message_time = m.time
last_message_hash = m_hash
# Skip empty messages.
if m.messageImportance is None:
continue
logging.info(message)
if str(m.messageImportance) == 'JOB_MESSAGE_ERROR':
if rank_error(m.messageText) >= last_error_rank:
last_error_rank = rank_error(m.messageText)
last_error_msg = m.messageText
if not page_token:
break
result._job = response
runner.last_error_msg = last_error_msg
@staticmethod
def group_by_key_input_visitor():
# Imported here to avoid circular dependencies.
from apache_beam.pipeline import PipelineVisitor
class GroupByKeyInputVisitor(PipelineVisitor):
"""A visitor that replaces `Any` element type for input `PCollection` of
a `GroupByKey` or `_GroupByKeyOnly` with a `KV` type.
TODO(BEAM-115): Once Python SDk is compatible with the new Runner API,
we could directly replace the coder instead of mutating the element type.
"""
def visit_transform(self, transform_node):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.transforms.core import GroupByKey, _GroupByKeyOnly
if isinstance(transform_node.transform, (GroupByKey, _GroupByKeyOnly)):
pcoll = transform_node.inputs[0]
input_type = pcoll.element_type
# If input_type is not specified, then treat it as `Any`.
if not input_type:
input_type = typehints.Any
if not isinstance(input_type, typehints.TupleHint.TupleConstraint):
if isinstance(input_type, typehints.AnyTypeConstraint):
# `Any` type needs to be replaced with a KV[Any, Any] to
# force a KV coder as the main output coder for the pcollection
# preceding a GroupByKey.
pcoll.element_type = typehints.KV[typehints.Any, typehints.Any]
else:
# TODO: Handle other valid types,
# e.g. Union[KV[str, int], KV[str, float]]
raise ValueError(
"Input to GroupByKey must be of Tuple or Any type. "
"Found %s for %s" % (input_type, pcoll))
return GroupByKeyInputVisitor()
@staticmethod
def flatten_input_visitor():
# Imported here to avoid circular dependencies.
from apache_beam.pipeline import PipelineVisitor
class FlattenInputVisitor(PipelineVisitor):
"""A visitor that replaces the element type for input ``PCollections``s of
a ``Flatten`` transform with that of the output ``PCollection``.
"""
def visit_transform(self, transform_node):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam import Flatten
if isinstance(transform_node.transform, Flatten):
output_pcoll = transform_node.outputs[None]
for input_pcoll in transform_node.inputs:
input_pcoll.element_type = output_pcoll.element_type
return FlattenInputVisitor()
# TODO(mariagh): Make this method take pipepline_options
def run(self, pipeline):
"""Remotely executes entire pipeline or parts reachable from node."""
# Import here to avoid adding the dependency for local running scenarios.
try:
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.internal import apiclient
except ImportError:
raise ImportError(
'Google Cloud Dataflow runner not available, '
'please install apache_beam[gcp]')
self.job = apiclient.Job(pipeline._options)
# Dataflow runner requires a KV type for GBK inputs, hence we enforce that
# here.
pipeline.visit(self.group_by_key_input_visitor())
# Dataflow runner requires output type of the Flatten to be the same as the
# inputs, hence we enforce that here.
pipeline.visit(self.flatten_input_visitor())
# The superclass's run will trigger a traversal of all reachable nodes.
super(DataflowRunner, self).run(pipeline)
standard_options = pipeline._options.view_as(StandardOptions)
if standard_options.streaming:
job_version = DataflowRunner.STREAMING_ENVIRONMENT_MAJOR_VERSION
else:
job_version = DataflowRunner.BATCH_ENVIRONMENT_MAJOR_VERSION
# Get a Dataflow API client and set its options
self.dataflow_client = apiclient.DataflowApplicationClient(
pipeline._options, job_version)
# Create the job
result = DataflowPipelineResult(
self.dataflow_client.create_job(self.job), self)
self._metrics = DataflowMetrics(self.dataflow_client, result, self.job)
result.metric_results = self._metrics
return result
def _get_typehint_based_encoding(self, typehint, window_coder):
"""Returns an encoding based on a typehint object."""
return self._get_cloud_encoding(self._get_coder(typehint,
window_coder=window_coder))
@staticmethod
def _get_coder(typehint, window_coder):
"""Returns a coder based on a typehint object."""
if window_coder:
return coders.WindowedValueCoder(
coders.registry.get_coder(typehint),
window_coder=window_coder)
return coders.registry.get_coder(typehint)
def _get_cloud_encoding(self, coder):
"""Returns an encoding based on a coder object."""
if not isinstance(coder, coders.Coder):
raise TypeError('Coder object must inherit from coders.Coder: %s.' %
str(coder))
return coder.as_cloud_object()
def _get_side_input_encoding(self, input_encoding):
"""Returns an encoding for the output of a view transform.
Args:
input_encoding: encoding of current transform's input. Side inputs need
this because the service will check that input and output types match.
Returns:
An encoding that matches the output and input encoding. This is essential
for the View transforms introduced to produce side inputs to a ParDo.
"""
return {
'@type': input_encoding['@type'],
'component_encodings': [input_encoding]
}
def _get_encoded_output_coder(self, transform_node, window_value=True):
"""Returns the cloud encoding of the coder for the output of a transform."""
if (len(transform_node.outputs) == 1
and transform_node.outputs[None].element_type is not None):
# TODO(robertwb): Handle type hints for multi-output transforms.
element_type = transform_node.outputs[None].element_type
else:
# TODO(silviuc): Remove this branch (and assert) when typehints are
# propagated everywhere. Returning an 'Any' as type hint will trigger
# usage of the fallback coder (i.e., cPickler).
element_type = typehints.Any
if window_value:
window_coder = (
transform_node.outputs[None].windowing.windowfn.get_window_coder())
else:
window_coder = None
return self._get_typehint_based_encoding(
element_type, window_coder=window_coder)
def _add_step(self, step_kind, step_label, transform_node, side_tags=()):
"""Creates a Step object and adds it to the cache."""
# Import here to avoid adding the dependency for local running scenarios.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.internal import apiclient
step = apiclient.Step(step_kind, self._get_unique_step_name())
self.job.proto.steps.append(step.proto)
step.add_property(PropertyNames.USER_NAME, step_label)
# Cache the node/step association for the main output of the transform node.
self._cache.cache_output(transform_node, None, step)
# If side_tags is not () then this is a multi-output transform node and we
# need to cache the (node, tag, step) for each of the tags used to access
# the outputs. This is essential because the keys used to search in the
# cache always contain the tag.
for tag in side_tags:
self._cache.cache_output(transform_node, tag, step)
# Finally, we add the display data items to the pipeline step.
# If the transform contains no display data then an empty list is added.
step.add_property(
PropertyNames.DISPLAY_DATA,
[item.get_dict() for item in
DisplayData.create_from(transform_node.transform).items])
return step
def _add_singleton_step(self, label, full_label, tag, input_step):
"""Creates a CollectionToSingleton step used to handle ParDo side inputs."""
# Import here to avoid adding the dependency for local running scenarios.
from apache_beam.runners.dataflow.internal import apiclient
step = apiclient.Step(TransformNames.COLLECTION_TO_SINGLETON, label)
self.job.proto.steps.append(step.proto)
step.add_property(PropertyNames.USER_NAME, full_label)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(tag)})
step.encoding = self._get_side_input_encoding(input_step.encoding)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{PropertyNames.USER_NAME: (
'%s.%s' % (full_label, PropertyNames.OUTPUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}])
return step
def run_Flatten(self, transform_node):
step = self._add_step(TransformNames.FLATTEN,
transform_node.full_label, transform_node)
inputs = []
for one_input in transform_node.inputs:
input_step = self._cache.get_pvalue(one_input)
inputs.append(
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(one_input.tag)})
step.add_property(PropertyNames.INPUTS, inputs)
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}])
def apply_GroupByKey(self, transform, pcoll):
# Infer coder of parent.
#
# TODO(ccy): make Coder inference and checking less specialized and more
# comprehensive.
parent = pcoll.producer
if parent:
coder = parent.transform._infer_output_coder() # pylint: disable=protected-access
if not coder:
coder = self._get_coder(pcoll.element_type or typehints.Any, None)
if not coder.is_kv_coder():
raise ValueError(('Coder for the GroupByKey operation "%s" is not a '
'key-value coder: %s.') % (transform.label,
coder))
# TODO(robertwb): Update the coder itself if it changed.
coders.registry.verify_deterministic(
coder.key_coder(), 'GroupByKey operation "%s"' % transform.label)
return pvalue.PCollection(pcoll.pipeline)
def run_GroupByKey(self, transform_node):
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.GROUP, transform_node.full_label, transform_node)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)})
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}])
windowing = transform_node.transform.get_windowing(
transform_node.inputs)
step.add_property(PropertyNames.SERIALIZED_FN, pickler.dumps(windowing))
def run_ParDo(self, transform_node):
transform = transform_node.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
# Attach side inputs.
si_dict = {}
# We must call self._cache.get_pvalue exactly once due to refcounting.
si_labels = {}
lookup_label = lambda side_pval: si_labels[side_pval]
for side_pval in transform_node.side_inputs:
assert isinstance(side_pval, AsSideInput)
si_label = 'SideInput-' + self._get_unique_step_name()
si_full_label = '%s/%s' % (transform_node.full_label, si_label)
self._add_singleton_step(
si_label, si_full_label, side_pval.pvalue.tag,
self._cache.get_pvalue(side_pval.pvalue))
si_dict[si_label] = {
'@type': 'OutputReference',
PropertyNames.STEP_NAME: si_label,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}
si_labels[side_pval] = si_label
# Now create the step for the ParDo transform being handled.
step = self._add_step(
TransformNames.DO,
transform_node.full_label + (
'/Do' if transform_node.side_inputs else ''),
transform_node,
transform_node.transform.output_tags)
fn_data = self._pardo_fn_data(transform_node, lookup_label)
step.add_property(PropertyNames.SERIALIZED_FN, pickler.dumps(fn_data))
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)})
# Add side inputs if any.
step.add_property(PropertyNames.NON_PARALLEL_INPUTS, si_dict)
# Generate description for the outputs. The output names
# will be 'out' for main output and 'out_<tag>' for a tagged output.
# Using 'out' as a tag will not clash with the name for main since it will
# be transformed into 'out_out' internally.
outputs = []
step.encoding = self._get_encoded_output_coder(transform_node)
# Add the main output to the description.
outputs.append(
{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT})
for side_tag in transform.output_tags:
# The assumption here is that all outputs will have the same typehint
# and coder as the main output. This is certainly the case right now
# but conceivably it could change in the future.
outputs.append(
{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, side_tag)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: (
'%s_%s' % (PropertyNames.OUT, side_tag))})
step.add_property(PropertyNames.OUTPUT_INFO, outputs)
@staticmethod
def _pardo_fn_data(transform_node, get_label):
transform = transform_node.transform
si_tags_and_types = [ # pylint: disable=protected-access
(get_label(side_pval), side_pval.__class__, side_pval._view_options())
for side_pval in transform_node.side_inputs]
return (transform.fn, transform.args, transform.kwargs, si_tags_and_types,
transform_node.inputs[0].windowing)
def apply_CombineValues(self, transform, pcoll):
return pvalue.PCollection(pcoll.pipeline)
def run_CombineValues(self, transform_node):
transform = transform_node.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.COMBINE, transform_node.full_label, transform_node)
# Combiner functions do not take deferred side-inputs (i.e. PValues) and
# therefore the code to handle extra args/kwargs is simpler than for the
# DoFn's of the ParDo transform. In the last, empty argument is where
# side inputs information would go.
fn_data = (transform.fn, transform.args, transform.kwargs, ())
step.add_property(PropertyNames.SERIALIZED_FN,
pickler.dumps(fn_data))
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)})
# Note that the accumulator must not have a WindowedValue encoding, while
# the output of this step does in fact have a WindowedValue encoding.
accumulator_encoding = self._get_encoded_output_coder(transform_node,
window_value=False)
output_encoding = self._get_encoded_output_coder(transform_node)
step.encoding = output_encoding
step.add_property(PropertyNames.ENCODING, accumulator_encoding)
# Generate description for main output 'out.'
outputs = []
# Add the main output to the description.
outputs.append(
{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT})
step.add_property(PropertyNames.OUTPUT_INFO, outputs)
def run_Read(self, transform_node):
transform = transform_node.transform
step = self._add_step(
TransformNames.READ, transform_node.full_label, transform_node)
# TODO(mairbek): refactor if-else tree to use registerable functions.
# Initialize the source specific properties.
if not hasattr(transform.source, 'format'):
# If a format is not set, we assume the source to be a custom source.
source_dict = {}
source_dict['spec'] = {
'@type': names.SOURCE_TYPE,
names.SERIALIZED_SOURCE_KEY: pickler.dumps(transform.source)
}
try:
source_dict['metadata'] = {
'estimated_size_bytes': json_value.get_typed_value_descriptor(
transform.source.estimate_size())
}
except error.RuntimeValueProviderError:
# Size estimation is best effort, and this error is by value provider.
logging.info(
'Could not estimate size of source %r due to ' + \
'RuntimeValueProviderError', transform.source)
except Exception: # pylint: disable=broad-except
# Size estimation is best effort. So we log the error and continue.
logging.info(
'Could not estimate size of source %r due to an exception: %s',
transform.source, traceback.format_exc())
step.add_property(PropertyNames.SOURCE_STEP_INPUT,
source_dict)
elif transform.source.format == 'text':
step.add_property(PropertyNames.FILE_PATTERN, transform.source.path)
elif transform.source.format == 'bigquery':
step.add_property(PropertyNames.BIGQUERY_EXPORT_FORMAT, 'FORMAT_AVRO')
# TODO(silviuc): Add table validation if transform.source.validate.
if transform.source.table_reference is not None:
step.add_property(PropertyNames.BIGQUERY_DATASET,
transform.source.table_reference.datasetId)
step.add_property(PropertyNames.BIGQUERY_TABLE,
transform.source.table_reference.tableId)
# If project owning the table was not specified then the project owning
# the workflow (current project) will be used.
if transform.source.table_reference.projectId is not None:
step.add_property(PropertyNames.BIGQUERY_PROJECT,
transform.source.table_reference.projectId)
elif transform.source.query is not None:
step.add_property(PropertyNames.BIGQUERY_QUERY, transform.source.query)
step.add_property(PropertyNames.BIGQUERY_USE_LEGACY_SQL,
transform.source.use_legacy_sql)
step.add_property(PropertyNames.BIGQUERY_FLATTEN_RESULTS,
transform.source.flatten_results)
else:
raise ValueError('BigQuery source %r must specify either a table or'
' a query',
transform.source)
elif transform.source.format == 'pubsub':
standard_options = (
transform_node.inputs[0].pipeline.options.view_as(StandardOptions))
if not standard_options.streaming:
raise ValueError('PubSubSource is currently available for use only in '
'streaming pipelines.')
step.add_property(PropertyNames.PUBSUB_TOPIC, transform.source.topic)
if transform.source.subscription:
step.add_property(PropertyNames.PUBSUB_SUBSCRIPTION,
transform.source.topic)
if transform.source.id_label:
step.add_property(PropertyNames.PUBSUB_ID_LABEL,
transform.source.id_label)
else:
raise ValueError(
'Source %r has unexpected format %s.' % (
transform.source, transform.source.format))
if not hasattr(transform.source, 'format'):
step.add_property(PropertyNames.FORMAT, names.SOURCE_FORMAT)
else:
step.add_property(PropertyNames.FORMAT, transform.source.format)
# Wrap coder in WindowedValueCoder: this is necessary as the encoding of a
# step should be the type of value outputted by each step. Read steps
# automatically wrap output values in a WindowedValue wrapper, if necessary.
# This is also necessary for proper encoding for size estimation.
coder = coders.WindowedValueCoder(transform._infer_output_coder()) # pylint: disable=protected-access
step.encoding = self._get_cloud_encoding(coder)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}])
def run__NativeWrite(self, transform_node):
transform = transform_node.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.WRITE, transform_node.full_label, transform_node)
# TODO(mairbek): refactor if-else tree to use registerable functions.
# Initialize the sink specific properties.
if transform.sink.format == 'text':
# Note that it is important to use typed properties (@type/value dicts)
# for non-string properties and also for empty strings. For example,
# in the code below the num_shards must have type and also
# file_name_suffix and shard_name_template (could be empty strings).
step.add_property(
PropertyNames.FILE_NAME_PREFIX, transform.sink.file_name_prefix,
with_type=True)
step.add_property(
PropertyNames.FILE_NAME_SUFFIX, transform.sink.file_name_suffix,
with_type=True)
step.add_property(
PropertyNames.SHARD_NAME_TEMPLATE, transform.sink.shard_name_template,
with_type=True)
if transform.sink.num_shards > 0:
step.add_property(
PropertyNames.NUM_SHARDS, transform.sink.num_shards, with_type=True)
# TODO(silviuc): Implement sink validation.
step.add_property(PropertyNames.VALIDATE_SINK, False, with_type=True)
elif transform.sink.format == 'bigquery':
# TODO(silviuc): Add table validation if transform.sink.validate.
step.add_property(PropertyNames.BIGQUERY_DATASET,
transform.sink.table_reference.datasetId)
step.add_property(PropertyNames.BIGQUERY_TABLE,
transform.sink.table_reference.tableId)
# If project owning the table was not specified then the project owning
# the workflow (current project) will be used.
if transform.sink.table_reference.projectId is not None:
step.add_property(PropertyNames.BIGQUERY_PROJECT,
transform.sink.table_reference.projectId)
step.add_property(PropertyNames.BIGQUERY_CREATE_DISPOSITION,
transform.sink.create_disposition)
step.add_property(PropertyNames.BIGQUERY_WRITE_DISPOSITION,
transform.sink.write_disposition)
if transform.sink.table_schema is not None:
step.add_property(
PropertyNames.BIGQUERY_SCHEMA, transform.sink.schema_as_json())
elif transform.sink.format == 'pubsub':
standard_options = (
transform_node.inputs[0].pipeline.options.view_as(StandardOptions))
if not standard_options.streaming:
raise ValueError('PubSubSink is currently available for use only in '
'streaming pipelines.')
step.add_property(PropertyNames.PUBSUB_TOPIC, transform.sink.topic)
else:
raise ValueError(
'Sink %r has unexpected format %s.' % (
transform.sink, transform.sink.format))
step.add_property(PropertyNames.FORMAT, transform.sink.format)
# Wrap coder in WindowedValueCoder: this is necessary for proper encoding
# for size estimation.
coder = coders.WindowedValueCoder(transform.sink.coder)
step.encoding = self._get_cloud_encoding(coder)
step.add_property(PropertyNames.ENCODING, step.encoding)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)})
class DataflowPipelineResult(PipelineResult):
"""Represents the state of a pipeline run on the Dataflow service."""
def __init__(self, job, runner):
"""Job is a Job message from the Dataflow API."""
self._job = job
self._runner = runner
self.metric_results = None
def job_id(self):
return self._job.id
def metrics(self):
return self.metric_results
@property
def has_job(self):
return self._job is not None
@property
def state(self):
"""Return the current state of the remote job.
Returns:
A PipelineState object.
"""
if not self.has_job:
return PipelineState.UNKNOWN
values_enum = dataflow_api.Job.CurrentStateValueValuesEnum
api_jobstate_map = {
values_enum.JOB_STATE_UNKNOWN: PipelineState.UNKNOWN,
values_enum.JOB_STATE_STOPPED: PipelineState.STOPPED,
values_enum.JOB_STATE_RUNNING: PipelineState.RUNNING,
values_enum.JOB_STATE_DONE: PipelineState.DONE,
values_enum.JOB_STATE_FAILED: PipelineState.FAILED,
values_enum.JOB_STATE_CANCELLED: PipelineState.CANCELLED,
values_enum.JOB_STATE_UPDATED: PipelineState.UPDATED,
values_enum.JOB_STATE_DRAINING: PipelineState.DRAINING,
values_enum.JOB_STATE_DRAINED: PipelineState.DRAINED,
}
return (api_jobstate_map[self._job.currentState] if self._job.currentState
else PipelineState.UNKNOWN)
def _is_in_terminal_state(self):
if not self.has_job:
return True
return self.state in [
PipelineState.STOPPED, PipelineState.DONE, PipelineState.FAILED,
PipelineState.CANCELLED, PipelineState.DRAINED]
def wait_until_finish(self, duration=None):
if not self._is_in_terminal_state():
if not self.has_job:
raise IOError('Failed to get the Dataflow job id.')
if duration:
raise NotImplementedError(
'DataflowRunner does not support duration argument.')
thread = threading.Thread(
target=DataflowRunner.poll_for_job_completion,
args=(self._runner, self))
# Mark the thread as a daemon thread so a keyboard interrupt on the main
# thread will terminate everything. This is also the reason we will not
# use thread.join() to wait for the polling thread.
thread.daemon = True
thread.start()
while thread.isAlive():
time.sleep(5.0)
if self.state != PipelineState.DONE:
# TODO(BEAM-1290): Consider converting this to an error log based on the
# resolution of the issue.
raise DataflowRuntimeException(
'Dataflow pipeline failed. State: %s, Error:\n%s' %
(self.state, getattr(self._runner, 'last_error_msg', None)), self)
return self.state
def __str__(self):
return '<%s %s %s>' % (
self.__class__.__name__,
self.job_id(),
self.state)
def __repr__(self):
return '<%s %s at %s>' % (self.__class__.__name__, self._job, hex(id(self)))
class DataflowRuntimeException(Exception):
"""Indicates an error has occurred in running this pipeline."""
def __init__(self, msg, result):
super(DataflowRuntimeException, self).__init__(msg)
self.result = result
|
dhalperi/beam
|
sdks/python/apache_beam/runners/dataflow/dataflow_runner.py
|
Python
|
apache-2.0
| 34,929
|
[
"VisIt"
] |
55443a6fc37c5ec03cf8477bd519f65c6023b5a7d970bb301fa04657b4689bb1
|
from types import SimpleNamespace
import numpy as np
import copy
import pytest
import numpy.testing as npt
from matplotlib.axes import Subplot
import matplotlib.pyplot as plt
from pulse2percept.implants import ArgusI, ArgusII
from pulse2percept.percepts import Percept
from pulse2percept.models import (Thompson2003Spatial, Thompson2003Model)
from pulse2percept.utils import Curcio1990Map, Watson2014DisplaceMap
from pulse2percept.utils.testing import assert_warns_msg
def test_Thompson2003Spatial():
# Thompson2003Spatial automatically sets `radius`:
model = Thompson2003Spatial(engine='serial', xystep=5)
# User can set `radius`:
model.radius = 123
npt.assert_equal(model.radius, 123)
model.build(radius=987)
npt.assert_equal(model.radius, 987)
# Nothing in, None out:
npt.assert_equal(model.predict_percept(ArgusI()), None)
# Converting ret <=> dva
model2 = Thompson2003Spatial(retinotopy=Watson2014DisplaceMap())
npt.assert_equal(isinstance(model2.retinotopy, Watson2014DisplaceMap),
True)
implant = ArgusI(stim=np.zeros(16))
# Zero in = zero out:
percept = model.predict_percept(implant)
npt.assert_equal(isinstance(percept, Percept), True)
npt.assert_equal(percept.shape, list(model.grid.x.shape) + [1])
npt.assert_almost_equal(percept.data, 0)
# Multiple frames are processed independently:
model = Thompson2003Spatial(engine='serial', radius=200, xystep=5,
xrange=(-20, 20), yrange=(-15, 15))
model.build()
percept = model.predict_percept(ArgusI(stim={'A1': [1, 0], 'B3': [0, 2]}))
npt.assert_equal(percept.shape, list(model.grid.x.shape) + [2])
pmax = percept.data.max(axis=(0, 1))
npt.assert_almost_equal(percept.data[2, 3, 0], pmax[0])
npt.assert_almost_equal(percept.data[2, 3, 1], 0)
npt.assert_almost_equal(percept.data[3, 4, 0], 0)
npt.assert_almost_equal(percept.data[3, 4, 1], pmax[1])
npt.assert_almost_equal(percept.time, [0, 1])
def test_deepcopy_Thompson2003Spatial():
original = Thompson2003Spatial()
copied = copy.deepcopy(original)
# Assert they are different objects
npt.assert_equal(id(original) != id(copied), True)
# Assert the objects are equivalent to each other
npt.assert_equal(original == copied, True)
# Assert building one object does not affect the copied
original.build()
npt.assert_equal(copied.is_built, False)
npt.assert_equal(original != copied, True)
# Change the copied attribute by "destroying" the retinotopy attribute
# which should be unique to each SpatialModel object
copied = copy.deepcopy(original)
copied.retinotopy = None
npt.assert_equal(original.retinotopy is not None, True)
npt.assert_equal(original != copied, True)
# Assert "destroying" the original doesn't affect the copied
original = None
npt.assert_equal(copied is not None, True)
def test_Thompson2003Model():
model = Thompson2003Model(engine='serial', xystep=5)
npt.assert_equal(model.has_space, True)
npt.assert_equal(model.has_time, False)
npt.assert_equal(hasattr(model.spatial, 'radius'), True)
# User can set `radius`:
model.radius = 123
npt.assert_equal(model.radius, 123)
npt.assert_equal(model.spatial.radius, 123)
model.build(radius=987)
npt.assert_equal(model.radius, 987)
npt.assert_equal(model.spatial.radius, 987)
# Converting ret <=> dva
npt.assert_equal(isinstance(model.retinotopy, Curcio1990Map), True)
npt.assert_almost_equal(model.retinotopy.ret2dva(0, 0), (0, 0))
npt.assert_almost_equal(model.retinotopy.dva2ret(0, 0), (0, 0))
model2 = Thompson2003Model(retinotopy=Watson2014DisplaceMap())
npt.assert_equal(isinstance(model2.retinotopy, Watson2014DisplaceMap),
True)
# Nothing in, None out:
npt.assert_equal(model.predict_percept(ArgusI()), None)
# Zero in = zero out:
implant = ArgusI(stim=np.zeros(16))
npt.assert_almost_equal(model.predict_percept(implant).data, 0)
# Multiple frames are processed independently:
model = Thompson2003Model(engine='serial', radius=1000, xystep=5,
xrange=(-20, 20), yrange=(-15, 15))
model.build()
percept = model.predict_percept(ArgusI(stim={'A1': [1, 2]}))
npt.assert_equal(percept.shape, list(model.grid.x.shape) + [2])
pmax = percept.data.max(axis=(0, 1))
npt.assert_almost_equal(percept.data[2, 3, :], pmax)
print(pmax, percept.data)
npt.assert_almost_equal(pmax[1] / pmax[0], 2.0)
npt.assert_almost_equal(percept.time, [0, 1])
def test_Thompson2003Model_predict_percept():
model = Thompson2003Model(xystep=0.55, radius=100, thresh_percept=0,
xrange=(-20, 20), yrange=(-15, 15))
model.build()
# Single-electrode stim:
img_stim = np.zeros(60)
img_stim[47] = 1
percept = model.predict_percept(ArgusII(stim=img_stim))
# Single bright pixel, very small Gaussian kernel:
npt.assert_equal(np.sum(percept.data > 0.5), 1)
npt.assert_equal(np.sum(percept.data > 0.00001), 1)
# Brightest pixel is in lower right:
npt.assert_almost_equal(percept.data[33, 46, 0], np.max(percept.data))
# Full Argus II: 60 bright spots
model = Thompson2003Model(engine='serial', xystep=0.55, radius=100)
model.build()
percept = model.predict_percept(ArgusII(stim=np.ones(60)))
npt.assert_equal(np.sum(np.isclose(percept.data, 1.0, rtol=0.1, atol=0.1)),
84)
# Model gives same outcome as Spatial:
spatial = Thompson2003Spatial(engine='serial', xystep=1, radius=100)
spatial.build()
spatial_percept = model.predict_percept(ArgusII(stim=np.ones(60)))
npt.assert_almost_equal(percept.data, spatial_percept.data)
npt.assert_equal(percept.time, None)
# Warning for nonzero electrode-retina distances
implant = ArgusI(stim=np.ones(16), z=10)
msg = ("Nonzero electrode-retina distances do not have any effect on the "
"model output.")
assert_warns_msg(UserWarning, model.predict_percept, msg, implant)
def test_deepcopy_Thompson2003Model():
original = Thompson2003Model()
copied = copy.deepcopy(original)
# Assert they are different objects
npt.assert_equal(id(original) != id(copied), True)
# Assert the objects are equivalent to each other
npt.assert_equal(original.__dict__ == copied.__dict__, True)
# Assert building one object does not affect the copied
original.build()
npt.assert_equal(copied.is_built, False)
npt.assert_equal(original != copied, True)
# Change the copied attribute by "destroying" the retinotopy attribute
# which should be unique to each SpatialModel object
copied = copy.deepcopy(original)
copied.retinotopy = None
npt.assert_equal(original.retinotopy is not None, True)
npt.assert_equal(original != copied, True)
# Assert "destroying" the original doesn't affect the copied
original = None
npt.assert_equal(copied is not None, True)
|
mbeyeler/pulse2percept
|
pulse2percept/models/tests/test_thompson2003.py
|
Python
|
bsd-3-clause
| 7,099
|
[
"Gaussian"
] |
813b6854795b38f20ecbacbef174e1953d097cd3a93696649664e528865e732c
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import numpy as np
from scipy.special import gammaln
from scipy.optimize import fmin_powell, minimize_scalar
from skbio.stats import subsample_counts
from skbio.util._decorator import experimental
def _validate(counts, suppress_cast=False):
"""Validate and convert input to an acceptable counts vector type.
Note: may not always return a copy of `counts`!
"""
counts = np.asarray(counts)
if not suppress_cast:
counts = counts.astype(int, casting='safe', copy=False)
if counts.ndim != 1:
raise ValueError("Only 1-D vectors are supported.")
elif (counts < 0).any():
raise ValueError("Counts vector cannot contain negative values.")
return counts
@experimental(as_of="0.4.0")
def berger_parker_d(counts):
r"""Calculate Berger-Parker dominance.
Berger-Parker dominance is defined as the fraction of the sample that
belongs to the most abundant OTU:
.. math::
d = \frac{N_{max}}{N}
where :math:`N_{max}` is defined as the number of individuals in the most
abundant OTU (or any of the most abundant OTUs in the case of ties), and
:math:`N` is defined as the total number of individuals in the sample.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Berger-Parker dominance.
Notes
-----
Berger-Parker dominance is defined in [1]_. The implementation here is
based on the description given in the SDR-IV online manual [2]_.
References
----------
.. [1] Berger & Parker (1970). SDR-IV online help.
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate(counts)
return counts.max() / counts.sum()
@experimental(as_of="0.4.0")
def brillouin_d(counts):
r"""Calculate Brillouin index of alpha diversity.
This is calculated as follows:
.. math::
HB = \frac{\ln N!-\sum^s_{i=1}{\ln n_i!}}{N}
where :math:`N` is defined as the total number of individuals in the
sample, :math:`s` is the number of OTUs, and :math:`n_i` is defined as the
number of individuals in the :math:`i^{\text{th}}` OTU.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Brillouin index.
Notes
-----
The implementation here is based on the description given in the SDR-IV
online manual [1]_.
References
----------
.. [1] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate(counts)
nz = counts[counts.nonzero()]
n = nz.sum()
return (gammaln(n + 1) - gammaln(nz + 1).sum()) / n
@experimental(as_of="0.4.0")
def dominance(counts):
r"""Calculate dominance.
Dominance is defined as
.. math::
\sum{p_i^2}
where :math:`p_i` is the proportion of the entire community that OTU
:math:`i` represents.
Dominance can also be defined as 1 - Simpson's index. It ranges between
0 and 1.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Dominance.
See Also
--------
simpson
Notes
-----
The implementation here is based on the description given in [1]_.
References
----------
.. [1] http://folk.uio.no/ohammer/past/diversity.html
"""
counts = _validate(counts)
freqs = counts / counts.sum()
return (freqs * freqs).sum()
@experimental(as_of="0.4.0")
def doubles(counts):
"""Calculate number of double occurrences (doubletons).
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
int
Doubleton count.
"""
counts = _validate(counts)
return (counts == 2).sum()
@experimental(as_of="0.4.0")
def enspie(counts):
r"""Calculate ENS_pie alpha diversity measure.
ENS_pie is equivalent to ``1 / dominance``:
.. math::
ENS_{pie} = \frac{1}{\sum_{i=1}^s{p_i^2}}
where :math:`s` is the number of OTUs and :math:`p_i` is the proportion of
the community represented by OTU :math:`i`.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
ENS_pie alpha diversity measure.
See Also
--------
dominance
Notes
-----
ENS_pie is defined in [1]_.
References
----------
.. [1] Chase and Knight (2013). "Scale-dependent effect sizes of ecological
drivers on biodiversity: why standardised sampling is not enough".
Ecology Letters, Volume 16, Issue Supplement s1, pgs 17-26.
"""
counts = _validate(counts)
return 1 / dominance(counts)
@experimental(as_of="0.4.0")
def equitability(counts, base=2):
"""Calculate equitability (Shannon index corrected for number of OTUs).
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
base : scalar, optional
Logarithm base to use in the calculations.
Returns
-------
double
Measure of equitability.
See Also
--------
shannon
Notes
-----
The implementation here is based on the description given in the SDR-IV
online manual [1]_.
References
----------
.. [1] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate(counts)
numerator = shannon(counts, base)
denominator = np.log(observed_otus(counts)) / np.log(base)
return numerator / denominator
@experimental(as_of="0.4.0")
def esty_ci(counts):
r"""Calculate Esty's CI.
Esty's CI is defined as
.. math::
F_1/N \pm z\sqrt{W}
where :math:`F_1` is the number of singleton OTUs, :math:`N` is the total
number of individuals (sum of abundances for all OTUs), and :math:`z` is a
constant that depends on the targeted confidence and based on the normal
distribution.
:math:`W` is defined as
.. math::
\frac{F_1(N-F_1)+2NF_2}{N^3}
where :math:`F_2` is the number of doubleton OTUs.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
tuple
Esty's confidence interval as ``(lower_bound, upper_bound)``.
Notes
-----
Esty's CI is defined in [1]_. :math:`z` is hardcoded for a 95% confidence
interval.
References
----------
.. [1] Esty, W. W. (1983). "A normal limit law for a nonparametric
estimator of the coverage of a random sample". Ann Statist 11: 905-912.
"""
counts = _validate(counts)
f1 = singles(counts)
f2 = doubles(counts)
n = counts.sum()
z = 1.959963985
W = (f1 * (n - f1) + 2 * n * f2) / (n ** 3)
return f1 / n - z * np.sqrt(W), f1 / n + z * np.sqrt(W)
@experimental(as_of="0.4.0")
def fisher_alpha(counts):
r"""Calculate Fisher's alpha, a metric of diversity.
Fisher's alpha is estimated by solving the following equation for
:math:`\alpha`:
.. math::
S=\alpha\ln(1+\frac{N}{\alpha})
where :math:`S` is the number of OTUs and :math:`N` is the
total number of individuals in the sample.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Fisher's alpha.
Raises
------
RuntimeError
If the optimizer fails to converge (error > 1.0).
Notes
-----
The implementation here is based on the description given in the SDR-IV
online manual [1]_. Uses ``scipy.optimize.minimize_scalar`` to find
Fisher's alpha.
References
----------
.. [1] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate(counts)
n = counts.sum()
s = observed_otus(counts)
def f(alpha):
return (alpha * np.log(1 + (n / alpha)) - s) ** 2
# Temporarily silence RuntimeWarnings (invalid and division by zero) during
# optimization in case invalid input is provided to the objective function
# (e.g. alpha=0).
orig_settings = np.seterr(divide='ignore', invalid='ignore')
try:
alpha = minimize_scalar(f).x
finally:
np.seterr(**orig_settings)
if f(alpha) > 1.0:
raise RuntimeError("Optimizer failed to converge (error > 1.0), so "
"could not compute Fisher's alpha.")
return alpha
@experimental(as_of="0.4.0")
def goods_coverage(counts):
r"""Calculate Good's coverage of counts.
Good's coverage estimator is defined as
.. math::
1-\frac{F_1}{N}
where :math:`F_1` is the number of singleton OTUs and :math:`N` is the
total number of individuals (sum of abundances for all OTUs).
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Good's coverage estimator.
"""
counts = _validate(counts)
f1 = singles(counts)
N = counts.sum()
return 1 - (f1 / N)
@experimental(as_of="0.4.0")
def heip_e(counts):
r"""Calculate Heip's evenness measure.
Heip's evenness is defined as:
.. math::
\frac{(e^H-1)}{(S-1)}
where :math:`H` is the Shannon-Wiener entropy of counts (using logarithm
base :math:`e`) and :math:`S` is the number of OTUs in the sample.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Heip's evenness measure.
See Also
--------
shannon
Notes
-----
The implementation here is based on the description in [1]_.
References
----------
.. [1] Heip, C. 1974. A new index measuring evenness. J. Mar. Biol. Ass.
UK., 54, 555-557.
"""
counts = _validate(counts)
return ((np.exp(shannon(counts, base=np.e)) - 1) /
(observed_otus(counts) - 1))
@experimental(as_of="0.4.0")
def kempton_taylor_q(counts, lower_quantile=0.25, upper_quantile=0.75):
"""Calculate Kempton-Taylor Q index of alpha diversity.
Estimates the slope of the cumulative abundance curve in the interquantile
range. By default, uses lower and upper quartiles, rounding inwards.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
lower_quantile : float, optional
Lower bound of the interquantile range. Defaults to lower quartile.
upper_quantile : float, optional
Upper bound of the interquantile range. Defaults to upper quartile.
Returns
-------
double
Kempton-Taylor Q index of alpha diversity.
Notes
-----
The index is defined in [1]_. The implementation here is based on the
description given in the SDR-IV online manual [2]_.
The implementation provided here differs slightly from the results given in
Magurran 1998. Specifically, we have 14 in the numerator rather than 15.
Magurran recommends counting half of the OTUs with the same # counts as the
point where the UQ falls and the point where the LQ falls, but the
justification for this is unclear (e.g. if there were a very large # OTUs
that just overlapped one of the quantiles, the results would be
considerably off). Leaving the calculation as-is for now, but consider
changing.
References
----------
.. [1] Kempton, R. A. and Taylor, L. R. (1976) Models and statistics for
species diversity. Nature, 262, 818-820.
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate(counts)
n = len(counts)
lower = int(np.ceil(n * lower_quantile))
upper = int(n * upper_quantile)
sorted_counts = np.sort(counts)
return (upper - lower) / np.log(sorted_counts[upper] /
sorted_counts[lower])
@experimental(as_of="0.4.0")
def margalef(counts):
r"""Calculate Margalef's richness index.
Margalef's D is defined as:
.. math::
D = \frac{(S - 1)}{\ln N}
where :math:`S` is the number of OTUs and :math:`N` is the total number of
individuals in the sample.
Assumes log accumulation.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Margalef's richness index.
Notes
-----
Based on the description in [1]_.
References
----------
.. [1] Magurran, A E 2004. Measuring biological diversity. Blackwell. pp.
76-77.
"""
counts = _validate(counts)
return (observed_otus(counts) - 1) / np.log(counts.sum())
@experimental(as_of="0.4.0")
def mcintosh_d(counts):
r"""Calculate McIntosh dominance index D.
McIntosh dominance index D is defined as:
.. math::
D = \frac{N - U}{N - \sqrt{N}}
where :math:`N` is the total number of individuals in the sample and
:math:`U` is defined as:
.. math::
U = \sqrt{\sum{{n_i}^2}}
where :math:`n_i` is the number of individuals in the :math:`i^{\text{th}}`
OTU.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
McIntosh dominance index D.
See Also
--------
mcintosh_e
Notes
-----
The index was proposed in [1]_. The implementation here is based on the
description given in the SDR-IV online manual [2]_.
References
----------
.. [1] McIntosh, R. P. 1967 An index of diversity and the relation of
certain concepts to diversity. Ecology 48, 1115-1126.
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate(counts)
u = np.sqrt((counts * counts).sum())
n = counts.sum()
return (n - u) / (n - np.sqrt(n))
@experimental(as_of="0.4.0")
def mcintosh_e(counts):
r"""Calculate McIntosh's evenness measure E.
McIntosh evenness measure E is defined as:
.. math::
E = \frac{\sqrt{\sum{n_i^2}}}{\sqrt{((N-S+1)^2 + S -1}}
where :math:`n_i` is the number of individuals in the :math:`i^{\text{th}}`
OTU, :math:`N` is the total number of individuals, and :math:`S` is the
number of OTUs in the sample.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
McIntosh evenness measure E.
See Also
--------
mcintosh_d
Notes
-----
The implementation here is based on the description given in [1]_, **NOT**
the one in the SDR-IV online manual, which is wrong.
References
----------
.. [1] Heip & Engels (1974) Comparing Species Diversity and Evenness
Indices. p 560.
"""
counts = _validate(counts)
numerator = np.sqrt((counts * counts).sum())
n = counts.sum()
s = observed_otus(counts)
denominator = np.sqrt((n - s + 1) ** 2 + s - 1)
return numerator / denominator
@experimental(as_of="0.4.0")
def menhinick(counts):
r"""Calculate Menhinick's richness index.
Menhinick's richness index is defined as:
.. math::
D_{Mn} = \frac{S}{\sqrt{N}}
where :math:`S` is the number of OTUs and :math:`N` is the total number of
individuals in the sample.
Assumes square-root accumulation.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Menhinick's richness index.
Notes
-----
Based on the description in [1]_.
References
----------
.. [1] Magurran, A E 2004. Measuring biological diversity. Blackwell. pp.
76-77.
"""
counts = _validate(counts)
return observed_otus(counts) / np.sqrt(counts.sum())
@experimental(as_of="0.4.0")
def michaelis_menten_fit(counts, num_repeats=1, params_guess=None):
r"""Calculate Michaelis-Menten fit to rarefaction curve of observed OTUs.
The Michaelis-Menten equation is defined as:
.. math::
S=\frac{nS_{max}}{n+B}
where :math:`n` is the number of individuals and :math:`S` is the number of
OTUs. This function estimates the :math:`S_{max}` parameter.
The fit is made to datapoints for :math:`n=1,2,...,N`, where :math:`N` is
the total number of individuals (sum of abundances for all OTUs).
:math:`S` is the number of OTUs represented in a random sample of :math:`n`
individuals.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
num_repeats : int, optional
The number of times to perform rarefaction (subsampling without
replacement) at each value of :math:`n`.
params_guess : tuple, optional
Initial guess of :math:`S_{max}` and :math:`B`. If ``None``, default
guess for :math:`S_{max}` is :math:`S` (as :math:`S_{max}` should
be >= :math:`S`) and default guess for :math:`B` is ``round(N / 2)``.
Returns
-------
S_max : double
Estimate of the :math:`S_{max}` parameter in the Michaelis-Menten
equation.
See Also
--------
skbio.stats.subsample_counts
Notes
-----
There is some controversy about how to do the fitting. The ML model given
in [1]_ is based on the assumption that error is roughly proportional to
magnitude of observation, reasonable for enzyme kinetics but not reasonable
for rarefaction data. Here we just do a nonlinear curve fit for the
parameters using least-squares.
References
----------
.. [1] Raaijmakers, J. G. W. 1987 Statistical analysis of the
Michaelis-Menten equation. Biometrics 43, 793-803.
"""
counts = _validate(counts)
n_indiv = counts.sum()
if params_guess is None:
S_max_guess = observed_otus(counts)
B_guess = int(round(n_indiv / 2))
params_guess = (S_max_guess, B_guess)
# observed # of OTUs vs # of individuals sampled, S vs n
xvals = np.arange(1, n_indiv + 1)
ymtx = np.empty((num_repeats, len(xvals)), dtype=int)
for i in range(num_repeats):
ymtx[i] = np.asarray([observed_otus(subsample_counts(counts, n))
for n in xvals], dtype=int)
yvals = ymtx.mean(0)
# Vectors of actual vals y and number of individuals n.
def errfn(p, n, y):
return (((p[0] * n / (p[1] + n)) - y) ** 2).sum()
# Return S_max.
return fmin_powell(errfn, params_guess, ftol=1e-5, args=(xvals, yvals),
disp=False)[0]
@experimental(as_of="0.4.0")
def observed_otus(counts):
"""Calculate the number of distinct OTUs.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
int
Distinct OTU count.
"""
counts = _validate(counts)
return (counts != 0).sum()
@experimental(as_of="0.4.0")
def osd(counts):
"""Calculate observed OTUs, singles, and doubles.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
osd : tuple
Observed OTUs, singles, and doubles.
See Also
--------
observed_otus
singles
doubles
Notes
-----
This is a convenience function used by many of the other measures that rely
on these three measures.
"""
counts = _validate(counts)
return observed_otus(counts), singles(counts), doubles(counts)
@experimental(as_of="0.4.0")
def robbins(counts):
r"""Calculate Robbins' estimator for the probability of unobserved outcomes.
Robbins' estimator is defined as:
.. math::
\frac{F_1}{n+1}
where :math:`F_1` is the number of singleton OTUs.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Robbins' estimate.
Notes
-----
Robbins' estimator is defined in [1]_. The estimate computed here is for
:math:`n-1` counts, i.e. the x-axis is off by 1.
References
----------
.. [1] Robbins, H. E (1968). Ann. of Stats. Vol 36, pp. 256-257.
"""
counts = _validate(counts)
return singles(counts) / counts.sum()
@experimental(as_of="0.4.0")
def shannon(counts, base=2):
r"""Calculate Shannon entropy of counts, default in bits.
Shannon-Wiener diversity index is defined as:
.. math::
H = -\sum_{i=1}^s\left(p_i\log_2 p_i\right)
where :math:`s` is the number of OTUs and :math:`p_i` is the proportion of
the community represented by OTU :math:`i`.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
base : scalar, optional
Logarithm base to use in the calculations.
Returns
-------
double
Shannon diversity index H.
Notes
-----
The implementation here is based on the description given in the SDR-IV
online manual [1]_ except that the default logarithm base used here is 2
instead of :math:`e`.
References
----------
.. [1] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate(counts)
freqs = counts / counts.sum()
nonzero_freqs = freqs[freqs.nonzero()]
return -(nonzero_freqs * np.log(nonzero_freqs)).sum() / np.log(base)
@experimental(as_of="0.4.0")
def simpson(counts):
r"""Calculate Simpson's index.
Simpson's index is defined as ``1 - dominance``:
.. math::
1 - \sum{p_i^2}
where :math:`p_i` is the proportion of the community represented by OTU
:math:`i`.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Simpson's index.
See Also
--------
dominance
Notes
-----
The implementation here is ``1 - dominance`` as described in [1]_. Other
references (such as [2]_) define Simpson's index as ``1 / dominance``.
References
----------
.. [1] http://folk.uio.no/ohammer/past/diversity.html
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate(counts)
return 1 - dominance(counts)
@experimental(as_of="0.4.0")
def simpson_e(counts):
r"""Calculate Simpson's evenness measure E.
Simpson's E is defined as
.. math::
E=\frac{1 / D}{S_{obs}}
where :math:`D` is dominance and :math:`S_{obs}` is the number of observed
OTUs.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Simpson's evenness measure E.
See Also
--------
dominance
enspie
simpson
Notes
-----
The implementation here is based on the description given in [1]_.
References
----------
.. [1] http://www.tiem.utk.edu/~gross/bioed/bealsmodules/simpsonDI.html
"""
counts = _validate(counts)
return enspie(counts) / observed_otus(counts)
@experimental(as_of="0.4.0")
def singles(counts):
"""Calculate number of single occurrences (singletons).
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
int
Singleton count.
"""
counts = _validate(counts)
return (counts == 1).sum()
@experimental(as_of="0.4.0")
def strong(counts):
r"""Calculate Strong's dominance index.
Strong's dominance index is defined as:
.. math::
D_w = max_i[(\frac{b_i}{N})-\frac{i}{S}]
where :math:`b_i` is the sequential cumulative totaling of the
:math:`i^{\text{th}}` OTU abundance values ranked from largest to smallest,
:math:`N` is the total number of individuals in the sample, and
:math:`S` is the number of OTUs in the sample. The expression in brackets
is computed for all OTUs, and :math:`max_i` denotes the maximum value in
brackets for any OTU.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Strong's dominance index (Dw).
Notes
-----
Strong's dominance index is defined in [1]_. The implementation here is
based on the description given in the SDR-IV online manual [2]_.
References
----------
.. [1] Strong, W. L., 2002 Assessing species abundance uneveness within and
between plant communities. Community Ecology, 3, 237-246.
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate(counts)
n = counts.sum()
s = observed_otus(counts)
i = np.arange(1, len(counts) + 1)
sorted_sum = np.sort(counts)[::-1].cumsum()
return (sorted_sum / n - (i / s)).max()
|
xguse/scikit-bio
|
skbio/diversity/alpha/_base.py
|
Python
|
bsd-3-clause
| 25,011
|
[
"scikit-bio"
] |
2b33c45b447759125ee51826fe412faa746db48398c8e532e8598ec5d768c21b
|
# -*- coding: utf-8 -*-
"""
Wind vector calculations in finite differences
"""
import numpy as np
from . import utils
grids = ('cartesian','lonlat') #,'gaussian')
# TODO: laplacian, see "High Performance Python", p. 118
#
class Wind3D(object):
def __init__(self, u, v, w, x=1., y=1., z=1., lats=45., hgridtype=grids[0]):
"""
Initialize a Wind3D instance
"""
if (u.shape != v.shape) or (u.shape != w.shape):
raise ValueError('u, v and w must be the same shape')
#if len(u.shape) not in (2, 3):
# raise ValueError('u and v must be rank 2, or 3 arrays')
self.u = u.copy()
self.v = v.copy()
self.w = w.copy()
if isinstance(lats, np.ndarray):
assert lats.shape == self.u.shape[:2], 'Reshape lats!'
self.lats = lats
self.hgridtype = hgridtype.lower()
if self.hgridtype not in grids:
raise ValueError('invalid grid type: {0:s}'.format(repr(hgridtype)))
if self.hgridtype == 'lonlat':
if type(x) is np.ndarray and type(y) is np.ndarray:
if u.shape[:2] != x.shape or u.shape[:2] != y.shape:
if len(x.shape) == len(y.shape) == 1:
self.x, self.y = np.meshgrid(x, y)
self.x, self.y = self.x.T, self.y.T
self.__lonlat2dist()
if u.shape[:2] != self.x.shape or u.shape[:2] != self.y.shape:
raise ValueError('Incorrect shape of coordinate arrays')
else:
raise ValueError('Incorrect shape of coordinate arrays')
else:
self.x = x
self.y = y
self.__lonlat2dist()
else:
self.x = x
self.y = y
else:
self.x = x
self.y = y
if isinstance(z, np.ndarray):
if z.shape[0] == u.shape[-1]:
# Assuming z is an array of vertical levels
self.z = z.reshape((1,)*2+z.shape)
else:
msg = 'z.shape={0} with u.shape={1} is not allowed'.format(z.shape, u.shape)
raise ValueError(msg)
else:
# Assuming z is a scalar
self.z = z
def __lonlat2dist(self):
"""
Converting input lon-lat arrays to distance arrays
"""
self.x = utils.lon2dist(self.x, self.y)
self.y = utils.lat2dist(self.y)
def __assert_vort(self):
if not hasattr(self, 'vo'): self.vo = self.vort_z()
def vort_z(self):
"""
Relative vorticity (z-component of curl)
r$\frac{\partial v}{\partial x} - \frac{\partial u}{\partial y}$
"""
f = dfdx(self.v, self.x, 0) - dfdx(self.u, self.y, 1)
return f
def hdiv(self):
"""
Horizontal divergence
r$\nabla_p\cdot\vec v$
"""
f = dfdx(self.u, self.x, 0) + dfdx(self.v, self.y, 1)
return f
def kvn(self):
numerator = self.vort_z()
dfm_stretch = dfdx(self.u, self.x, 0) - dfdx(self.v, self.y, 1)
dfm_shear = dfdx(self.u, self.y, 1) + dfdx(self.v, self.x, 0)
denominator = (self.hdiv()**2 + dfm_shear**2 + dfm_stretch**2)**0.5
return numerator/denominator
# def _udvodx(self):
# self.__assert_vort()
# f = self.u*dfdx(self.vo, self.x, 0)
# return f
#
# def _vdvody(self):
# self.__assert_vort()
# f = self.v*dfdx(self.vo, self.y, 1)
# return f
#
# def _duvodx(self):
# self.__assert_vort()
# f = dfdx(self.u*self.vo, self.x, 0)
# return f
#
# def _dvvody(self):
# self.__assert_vort()
# f = dfdx(self.v*self.vo, self.y, 1)
# return f
def vort_tend_hadv(self):
"""
Horizontal advection of relative vorticity
r$\vec v\cdot \nabla_p \zeta$
"""
self.__assert_vort()
f = self.u*dfdx(self.vo, self.x, 0) + \
self.v*dfdx(self.vo, self.y, 1)
f = -f
return f
def vort_tend_hadv_flux(self):
"""
Horizontal advection in flux form
r$\nabla_p \cdot (\zeta\vec v)$
"""
self.__assert_vort()
f = dfdx(self.u*self.vo, self.x, 0) + \
dfdx(self.v*self.vo, self.y, 1)
f = -f
return f
def vort_tend_vadv(self):
"""
Vertical advection of relative vorticity
r$\omega \frac{\partial \zeta}{\partial p}$
"""
self.__assert_vort()
f = - self.w*dfdx(self.vo, self.z, 2)
return f
def planet_vort_adv(self):
"""
Planetary vorticity advection
r$\beta v$
"""
fcor = utils.calc_fcor(self.lats)
beta = dfdx(fcor,self.y,1)
beta = beta.repeat(self.v.shape[-1]).reshape(self.v.shape)
f = - self.v*beta
return f
def vort_tend_stretch(self):
"""
Stretching term
r$\nabla_p\cdot\vec v (\zeta+f)$
"""
self.__assert_vort()
div = self.hdiv()
fcor = utils.calc_fcor(self.lats)
fcor = fcor.repeat(self.vo.shape[-1]).reshape(self.vo.shape)
f = - div*(self.vo + fcor)
return f
def vort_tend_div_fcor(self):
"""
Product of divergence and Coriolis parameter
r$f\nabla_p\cdot\vec v$
"""
div = self.hdiv()
fcor = utils.calc_fcor(self.lats)
fcor = fcor.repeat(div.shape[-1]).reshape(div.shape)
f = - div*fcor
return f
def vort_tend_twist(self):
"""
Tilting/twisting term
r$\vec k \cdot \nabla\omega\times\frac{\partial\vec v}{\partial p}$
"""
dwdx = dfdx(self.w, self.x, 0)
dwdy = dfdx(self.w, self.y, 1)
dudp = dfdx(self.u, self.z, 2)
dvdp = dfdx(self.v, self.z, 2)
f = - (dwdx*dvdp - dwdy*dudp)
return f
def vort_tend_rhs(self, form='standard'):
"""
Right-hand side of vorticity equation in pressure coordinates
Kwargs:
-------
form: str, standard | flux
standard : standard form
flux : horizontal advection in flux form, stretching term -> div*fcor
Returns:
--------
sequence of terms of `numpy.ndarray` type
Reference: Bluestein, 1992 Vol I, sect. 4.5.4
"""
if form.lower() == 'standard':
self.vo = self.vort_z()
hadv = self.vort_tend_hadv()
vadv = self.vort_tend_vadv()
planet_vort_adv = self.planet_vort_adv()
stretch = self.vort_tend_stretch()
twist = self.vort_tend_twist()
elif form.lower() == 'flux':
self.vo = self.vort_z()
hadv = self.vort_tend_hadv_flux()
vadv = self.vort_tend_vadv()
planet_vort_adv = self.planet_vort_adv()
stretch = self.vort_tend_div_fcor()
twist = self.vort_tend_twist()
else:
raise ValueError('`form` keyword should be either "standard" or "flux"')
return hadv, vadv, planet_vort_adv, stretch, twist
class WindHorizontal(object):
def __init__(self, u, v, x=1., y=1., hgridtype=grids[0]):
"""
Initialize a WindHorizontal instance
"""
if u.shape != v.shape:
raise ValueError('u and v must be the same shape')
if len(u.shape) not in (2, 3):
raise ValueError('u and v must be rank 2, or 3 arrays')
self.u = u.copy()
self.v = v.copy()
if len(u.shape) > 2:
self.nd = u.shape[2]
else:
self.nd = 0
self.hgridtype = hgridtype.lower()
if self.hgridtype not in grids:
raise ValueError('invalid grid type: {0:s}'.format(repr(hgridtype)))
if self.hgridtype == 'lonlat':
if type(x) is np.ndarray and type(y) is np.ndarray:
if u.shape[:2] != x.shape or u.shape[:2] != y.shape:
if len(x.shape) == len(y.shape) == 1:
self.x, self.y = np.meshgrid(x, y)
self.x, self.y = self.x.T, self.y.T
self.__lonlat2dist()
if u.shape[:2] != self.x.shape or u.shape[:2] != self.y.shape:
raise ValueError('Incorrect shape of coordinate arrays')
else:
raise ValueError('Incorrect shape of coordinate arrays')
else:
self.x = x
self.y = y
self.__lonlat2dist()
else:
self.x = x
self.y = y
else:
self.x = x
self.y = y
def __lonlat2dist(self):
"""
Converting input lon-lat arrays to distance arrays
"""
self.x = utils.lon2dist(self.x, self.y)
self.y = utils.lat2dist(self.y)
def magnitude(self):
"""
Calculate wind speed (magnitude of wind vector)
"""
return np.sqrt(self.u**2 + self.v**2)
def winddir_meteo(self, outfmt='numeric', nbins=16):
"""
Calculate wind direction according to meteorological convention
"""
f = 180.+180./np.pi*np.arctan2(self.u, self.v)
if outfmt == 'name':
f = utils.deg2name(f, nbins)
return f
def vort_z(self):
"""
Relative vorticity (z-component of curl)
"""
f = dfdx(self.v, self.x, 0) - dfdx(self.u, self.y, 1)
# if self.nd > 0:
# f = np.zeros(self.u.shape, dtype=self.u.dtype)
# for i in range(self.nd):
# f[:,:,i] = dfdx(self.v[:,:,i], self.x, 0) - dfdx(self.u[:,:,i], self.y, 1)
# else:
# f = dfdx(self.v, self.x, 0) - dfdx(self.u, self.y, 1)
return f
def dfdx(f,x,axis=0):
"""
Generic derivative
"""
df = np.gradient(f)[axis]
if isinstance(x, np.ndarray):
#if len(df.shape) == 3: CHECK!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# dx = dx.reshape(dx.shape[:2] + (np.prod(dx.shape[2:]),))
if x.shape == df.shape:
dx = np.gradient(x)[axis]
elif x.shape[-1] == df.shape[-1]:
dx = np.gradient(x.squeeze())
dx = dx.reshape(x.shape)
else:
dx = x
return df/dx
class ScalarHorizontal(object):
def __init__(self, s, x=1., y=1., hgridtype=grids[0]):
"""
Initialize a ScalarHorizontal instance
"""
if len(s.shape) not in (2, 3):
raise ValueError('Scalar s must be rank 2, or 3 arrays')
self.s = s.copy()
if len(s.shape) > 2:
self.nd = s.shape[2]
else:
self.nd = 0
self.hgridtype = hgridtype.lower()
if self.hgridtype not in grids:
raise ValueError('invalid grid type: {0:s}'.format(repr(hgridtype)))
if self.hgridtype == 'lonlat':
if type(x) is np.ndarray and type(y) is np.ndarray:
if s.shape[:2] != x.shape or s.shape[:2] != y.shape:
if len(x.shape) == len(y.shape) == 1:
self.x, self.y = np.meshgrid(x, y)
self.x, self.y = self.x.T, self.y.T
self.__lonlat2dist()
if s.shape[:2] != self.x.shape or s.shape[:2] != self.y.shape:
raise ValueError('Incorrect shape of coordinate arrays')
else:
raise ValueError('Incorrect shape of coordinate arrays')
else:
self.x = x
self.y = y
self.__lonlat2dist()
else:
self.x = x
self.y = y
else:
self.x = x
self.y = y
def __lonlat2dist(self):
"""
Converting input lon-lat arrays to distance arrays
"""
self.x = utils.lon2dist(self.x, self.y)
self.y = utils.lat2dist(self.y)
def gradient(self):
"""
Calculate horizontal components of gradient
"""
fx = dfdx(self.s, self.x, 0)
fy = dfdx(self.s, self.y, 1)
# if self.nd > 0:
# fx = np.zeros(self.s.shape, dtype=self.s.dtype)
# fy = np.zeros(self.s.shape, dtype=self.s.dtype)
# for i in range(self.nd):
# fx[:,:,i] = dfdx(self.s[:,:,i], self.x, 0)
# fy[:,:,i] = dfdx(self.s[:,:,i], self.y, 1)
# else:
# fx = dfdx(self.s, self.x, 0)
# fy = dfdx(self.s, self.y, 1)
return fx, fy
def gradient_mag(self):
"""
Calculate magnitude of horizontal gradient
"""
fx, fy = self.gradient()
f = np.sqrt(fx**2 + fy**2)
return f
|
dennissergeev/pyveccalc
|
pyveccalc/standard.py
|
Python
|
mit
| 12,996
|
[
"Gaussian"
] |
528932a42d97c63ebec29a968215277d64dcb1a525b6693a6beb36c7a12a8828
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import time
import itertools
import numpy as np
import mdtraj as md
from mdtraj.testing import eq, skipif, get_fn, assert_allclose
from mdtraj.geometry.distance import compute_distances, compute_displacements, find_closest_contact
from mdtraj.geometry.distance import _displacement_mic, _displacement
N_FRAMES = 20
N_ATOMS = 20
xyz = np.asarray(np.random.randn(N_FRAMES, N_ATOMS, 3), dtype=np.float32)
pairs = np.array(list(itertools.combinations(range(N_ATOMS), 2)), dtype=np.int32)
ptraj = md.Trajectory(xyz=xyz, topology=None)
ptraj.unitcell_vectors = np.ascontiguousarray(np.random.randn(N_FRAMES, 3, 3) + 2*np.eye(3,3), dtype=np.float32)
def test_generator():
pairs2 = itertools.combinations(range(N_ATOMS), 2)
a = compute_distances(ptraj, pairs)
b = compute_distances(ptraj, pairs2)
eq(a, b)
def test_0():
a = compute_distances(ptraj, pairs, periodic=False, opt=True)
b = compute_distances(ptraj, pairs, periodic=False, opt=False)
eq(a, b)
def test_1():
a = compute_displacements(ptraj, pairs, periodic=False, opt=True)
b = compute_displacements(ptraj, pairs, periodic=False, opt=False)
eq(a, b)
def test_2():
a = compute_distances(ptraj, pairs, periodic=False, opt=False)
b = compute_displacements(ptraj, pairs, periodic=False, opt=False)
eq(a, np.sqrt(np.sum(np.square(b), axis=2)))
def test_3():
a = compute_distances(ptraj, pairs, periodic=False, opt=True)
b = compute_displacements(ptraj, pairs, periodic=False, opt=True)
eq(a, np.sqrt(np.sum(np.square(b), axis=2)))
def test_0p():
a = compute_distances(ptraj, pairs, periodic=True, opt=True)
b = compute_distances(ptraj, pairs, periodic=True, opt=False)
eq(a, b, decimal=3)
def test_1p():
a = compute_displacements(ptraj, pairs, periodic=True, opt=True)
b = compute_displacements(ptraj, pairs, periodic=True, opt=False)
eq(a, b, decimal=3)
def test_2p():
a = compute_distances(ptraj, pairs, periodic=True, opt=False)
b = compute_displacements(ptraj, pairs, periodic=True, opt=False)
assert a.shape == (len(ptraj), len(pairs))
assert b.shape == (len(ptraj), len(pairs), 3), str(b.shape)
b = np.sqrt(np.sum(np.square(b), axis=2))
eq(a, b, decimal=5)
def test_3p():
a = compute_distances(ptraj, pairs, periodic=True, opt=True)
b = compute_displacements(ptraj, pairs, periodic=True, opt=True)
eq(a, np.sqrt(np.sum(np.square(b), axis=2)))
def test_4():
# using a really big box, we should get the same results with and without
# pbcs
box = np.array([[100, 0, 0], [0, 200, 0], [0, 0, 300]])
box = np.zeros((N_FRAMES, 3, 3)) + box #broadcast it out
a = _displacement_mic(xyz, pairs, box, False)
b = _displacement(xyz, pairs)
eq(a, b, decimal=3)
def test_5():
# simple wrap around along the z axis.
xyz = np.array([[[0.0, 0.0, 0.0], [0.0, 0.0, 2.2]]])
box = np.eye(3,3).reshape(1,3,3)
result = _displacement_mic(xyz, np.array([[0,1]]), box, True)
eq(result, np.array([[[0, 0, 0.2]]]))
def test_6():
ext_ref = np.array([17.4835, 22.2418, 24.2910, 22.5505, 12.8686, 22.1090,
7.4472, 22.4253, 19.8283, 20.6935]) / 10
_run_amber_traj('test_good.nc', ext_ref)
def test_7():
ext_ref = np.array([30.9184, 23.9040, 25.3869, 28.0060, 25.9704, 24.6836,
23.0508, 27.1983, 24.4954, 26.7448]) / 10
_run_amber_traj('test_bad.nc', ext_ref)
def _run_amber_traj(trajname, ext_ref):
# Test triclinic case where simple approach in Tuckerman text does not
# always work
traj = md.load(get_fn(trajname), top=get_fn('test.parm7'))
distopt = md.compute_distances(traj, [[0, 9999]], opt=True)
distslw = md.compute_distances(traj, [[0, 9999]], opt=False)
dispopt = md.compute_displacements(traj, [[0, 9999]], opt=True)
dispslw = md.compute_displacements(traj, [[0, 9999]], opt=False)
eq(distopt, distslw, decimal=5)
eq(dispopt, dispslw, decimal=5)
assert_allclose(distopt.flatten(), ext_ref, atol=2e-5)
# Make sure distances from displacements are the same
eq(np.sqrt((dispopt.squeeze()**2).sum(axis=1)), distopt.squeeze())
eq(np.sqrt((dispslw.squeeze()**2).sum(axis=1)), distslw.squeeze())
eq(dispopt, dispslw, decimal=5)
def test_closest_contact():
box_size = np.array([3.0, 4.0, 5.0])
traj = md.Trajectory(xyz=xyz*box_size, topology=None)
_verify_closest_contact(traj)
traj.unitcell_lengths = np.array([box_size for i in range(N_FRAMES)])
traj.unitcell_angles = np.array([[90.0, 90.0, 90.0] for i in range(N_FRAMES)])
_verify_closest_contact(traj)
traj.unitcell_angles = np.array([[80.0, 90.0, 100.0] for i in range(N_FRAMES)])
_verify_closest_contact(traj)
def _verify_closest_contact(traj):
group1 = np.array([i for i in range(N_ATOMS//2)], dtype=np.int)
group2 = np.array([i for i in range(N_ATOMS//2, N_ATOMS)], dtype=np.int)
contact = find_closest_contact(traj, group1, group2)
pairs = np.array([(i,j) for i in group1 for j in group2], dtype=np.int)
dists = md.compute_distances(traj, pairs, True)[0]
dists2 = md.compute_distances(traj, pairs, False)[0]
nearest = np.argmin(dists)
eq(float(dists[nearest]), contact[2], decimal=5)
assert((pairs[nearest,0] == contact[0] and pairs[nearest,1] == contact[1]) or (pairs[nearest,0] == contact[1] and pairs[nearest,1] == contact[0]))
|
ctk3b/mdtraj
|
mdtraj/geometry/tests/test_distance.py
|
Python
|
lgpl-2.1
| 6,414
|
[
"MDTraj"
] |
25fdee5f983f34d719f8c528ccd87415a33e8a13e2f62f40cefaccc0f852d23a
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Visualization sample for bonds. Simulates a large chain of particles connected
via harmonic bonds.
"""
import espressomd
from espressomd import thermostat
from espressomd import integrate
from espressomd.interactions import HarmonicBond
from espressomd import visualization
import numpy as np
from threading import Thread
required_features = ["LENNARD_JONES"]
espressomd.assert_features(required_features)
box_l = 50
n_part = 200
system = espressomd.System(box_l=[box_l] * 3)
system.set_random_state_PRNG()
np.random.seed(seed=system.seed)
system.time_step = 0.01
system.cell_system.skin = 0.4
system.thermostat.set_langevin(kT=0.1, gamma=20.0, seed=42)
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=0, sigma=1,
cutoff=2, shift="auto")
system.bonded_inter[0] = HarmonicBond(k=0.5, r_0=1.0)
for i in range(n_part):
system.part.add(id=i, pos=np.random.random(3) * system.box_l)
for i in range(n_part - 1):
system.part[i].add_bond((system.bonded_inter[0], system.part[i + 1].id))
#visualizer = visualization.mayaviLive(system)
visualizer = visualization.openGLLive(system, bond_type_radius=[0.3])
system.minimize_energy.init(
f_max=10, gamma=50.0, max_steps=1000, max_displacement=0.2)
system.minimize_energy.minimize()
visualizer.run(1)
|
mkuron/espresso
|
samples/visualization_bonded.py
|
Python
|
gpl-3.0
| 1,999
|
[
"ESPResSo"
] |
2312c7bcd2e61cacd2d01525bbb69850ddc3a1270055703c6af6ccf276afe320
|
#
# Copyright (C) 2013-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Tests particle property setters/getters
from __future__ import print_function
import unittest as ut
import espressomd
import numpy as np
from time import time
@ut.skipIf(not espressomd.has_features("TABULATED"), "Skipped because feature is disabled")
class TabulatedTest(ut.TestCase):
s = espressomd.System(box_l=[1.0, 1.0, 1.0])
s.seed = s.cell_system.get_state()['n_nodes'] * [1234]
s.box_l = 3 * [10]
s.time_step = 0.01
s.cell_system.skin = 0.4
def setUp(self):
self.force = np.zeros((100,))
self.energy = np.zeros((100,))
self.min_ = 1.
self.max_ = 2.
self.dx = (self.max_ - self.min_) / 99.
for i in range(0, 100):
self.force[i] = 5 + i * 2.3 * self.dx
self.energy[i] = 5 - i * 2.3 * self.dx
self.s.part.clear()
self.s.part.add(id=0, type=0, pos=[5., 5., 5.0])
self.s.part.add(id=1, type=0, pos=[5., 5., 5.5])
def check(self):
# Below cutoff
np.testing.assert_allclose(np.copy(self.s.part[:].f), 0.0)
for z in np.linspace(0, self.max_ - self.min_, 200, endpoint=False):
self.s.part[1].pos = [5., 5., 6. + z]
self.s.integrator.run(0)
np.testing.assert_allclose(
np.copy(self.s.part[0].f), [0., 0., 5. + z * 2.3])
np.testing.assert_allclose(
np.copy(self.s.part[0].f), -np.copy(self.s.part[1].f))
self.assertAlmostEqual(
self.s.analysis.energy()['total'], 5. - z * 2.3)
def test_non_bonded(self):
self.s.non_bonded_inter[0, 0].tabulated.set_params(
min=self.min_, max=self.max_, energy=self.energy, force=self.force)
np.testing.assert_allclose(
self.force, self.s.non_bonded_inter[0, 0].tabulated.get_params()['force'])
np.testing.assert_allclose(
self.energy, self.s.non_bonded_inter[0, 0].tabulated.get_params()['energy'])
self.assertAlmostEqual(
self.min_, self.s.non_bonded_inter[0, 0].tabulated.get_params()['min'])
self.assertAlmostEqual(
self.max_, self.s.non_bonded_inter[0, 0].tabulated.get_params()['max'])
self.check()
self.s.non_bonded_inter[0, 0].tabulated.set_params(
min=-1, max=-1, energy=[], force=[])
def test_bonded(self):
from espressomd.interactions import Tabulated
tb = Tabulated(
type='distance', min=self.min_, max=self.max_, energy=self.energy,
force=self.force)
self.s.bonded_inter.add(tb)
np.testing.assert_allclose(self.force, tb.params['force'])
np.testing.assert_allclose(self.energy, tb.params['energy'])
self.assertAlmostEqual(self.min_, tb.params['min'])
self.assertAlmostEqual(self.max_, tb.params['max'])
self.s.part[0].add_bond((tb, 1))
self.check()
self.s.part[0].delete_bond((tb, 1))
if __name__ == "__main__":
ut.main()
|
hmenke/espresso
|
testsuite/python/tabulated.py
|
Python
|
gpl-3.0
| 3,708
|
[
"ESPResSo"
] |
76893e2b92ec7274e49c00969417c090557e3e31071301c2aea5d8b44803e402
|
"""The Mayavi engine. This class manages the Mayavi objects at the
highest level.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005-2015, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
# VTK is used to just shut off the warnings temporarily.
try:
import vtk
except ImportError as m:
m.args = ('%s\n%s\nDo you have vtk and its Python bindings installed properly?' %
(m.args[0], '_'*80),)
raise
# Enthought library imports.
from traits.api import (HasStrictTraits, List, Str,
Property, Instance, Event, HasTraits, Callable, Dict,
Bool, on_trait_change, WeakRef)
from traitsui.api import View, Item
from apptools.persistence import state_pickler
from apptools.scripting.api import Recorder, recordable
# Local imports.
from tvtk.common import is_old_pipeline
from mayavi.core.base import Base
from mayavi.core.scene import Scene
from mayavi.core.common import error, process_ui_events
from mayavi.core.registry import registry
from mayavi.core.adder_node import AdderNode, SceneAdderNode
from mayavi.preferences.api import preference_manager
from mayavi.core.ui.mayavi_scene import viewer_factory
######################################################################
# Utility functions.
######################################################################
def _id_generator():
"""Returns a sequence of numbers for the title of the scene
window."""
n = 1
while True:
yield(n)
n += 1
scene_id_generator = _id_generator()
def get_args(function):
""" Simple inspect-like function to inspect the arguments a function
takes.
"""
return function.__code__.co_varnames[:function.__code__.co_argcount]
######################################################################
# `Engine` class
######################################################################
class Engine(HasStrictTraits):
""" The Mayavi engine base class.
"""
# The version of this class. Used for persistence.
__version__ = 0
# The scenes associated with this project.
scenes = List(Scene, record=True)
# The list to provide to a TreeEditor. Always add on a AdderNode.
# TODO: It makes more sense to put the modification of the list
# in some other UI module, and not here.
children_ui_list = Property(record=False)
# Our name.
name = Str('Mayavi Engine')
# Current scene.
current_scene = Property(Instance(Scene), record=False)
# Current object.
current_object = Property(record=False)
# Current selection -- the currently selected object on the tree.
current_selection = Property(record=False)
# Has the Engine started? Use this event to do something after
# the engine has been started.
started = Event(record=False)
# An optional callable that will generate a usable new viewer
# containing a `tvtk.pyface.TVTKScene` instance. Ideally
# the viewer should have an interface like
# `tvtk.pyface.TVTKWindow` -- basically it must
# implement the `closing` and `activated` events, however, this is
# not necessary. The created viewer is used by the `new_scene`
# method to create a new Viewer. This is a mechanism to use a
# user specified scene with the Engine and have the ability to
# load saved visualizations using the new scene. Handy for things
# like off-screen rendering.
scene_factory = Callable(viewer_factory)
# Are we running?
running = Bool(False, record=False)
# This event is invoked when the engine has been stopped.
closed = Event()
# The recorder for script recording.
recorder = Instance(Recorder, record=False)
########################################
# Private traits.
_current_scene = WeakRef(Scene, allow_none=True)
_current_object = WeakRef(HasTraits, allow_none=True)
_current_selection = WeakRef(HasTraits, allow_none=True)
_viewer_ref = Dict
# View related traits.
current_selection_view = View(Item(name='_current_selection',
enabled_when='_current_selection is not None',
style='custom', springy=True,
show_label=False,),
resizable=True,
scrollable=True
)
######################################################################
# `object` interface
######################################################################
def __init__(self, **traits):
super(Engine, self).__init__(**traits)
# FIXME: This is tied to preferences. It really should not be
# we need to use bind_preferences here.
# To remove ref cycle with root preferences helper, the trait change
# handler is an instance method
preference_manager.root.on_trait_change(self._show_helper_nodes_changed,
'show_helper_nodes')
def __get_pure_state__(self):
d = self.__dict__.copy()
for x in ['_current_scene', '_current_object',
'__sync_trait__', '_viewer_ref',
'__traits_listener__']:
d.pop(x, None)
return d
def __set_pure_state__(self, state):
# Current number of scenes.
n_scene = len(self.scenes)
# Number of scenes in saved state.
n_saved_scene = len(state.scenes)
# Remove extra ones.
for i in range(n_scene - n_saved_scene):
self.close_scene(self.scenes[-1])
# Add new ones.
for i in range(n_saved_scene - n_scene):
self.new_scene()
# Set the state.
state_pickler.set_state(self, state)
def __getstate__(self):
return state_pickler.dumps(self)
def __setstate__(self, str_state):
self.__init__()
state = state_pickler.loads_state(str_state)
state_pickler.update_state(state)
self.__set_pure_state__(state)
######################################################################
# `Engine` interface
######################################################################
def start(self):
"""This is called by the plugin when the plugin actually
starts."""
registry.register_engine(self)
# Notify any listeners that the engine is started.
self.started = self
self.running = True
def stop(self):
registry.unregister_engine(self)
self.running = False
self.closed = True
@recordable
def add_source(self, src, scene=None):
"""Adds a source to the pipeline. Uses the current scene unless a
scene is given in the scene keyword argument."""
passed_scene = scene
if scene is not None:
tvtk_scene = scene.scene
for sc in self.scenes:
if sc.scene == tvtk_scene:
scene = sc
break
else:
error('This scene is not managed by mayavi')
return
else:
scene = self.current_scene
# Create a new scene if none is available.
if scene is None:
self.new_scene()
scene = self.current_scene
scene.add_child(src)
self.current_object = src
@recordable
def add_filter(self, fil, obj=None):
"""Adds a filter to the pipeline at an appropriate point. Adds it
to the selected object, or to an object passed as the
kwarg `obj`.
"""
passed_obj = obj
if obj is None:
obj = self.current_object
if not isinstance(obj, Base):
msg = 'No valid current object, '\
'please select an active object.'
error(msg)
return
if (obj is not None) and (not isinstance(obj, Scene)):
if obj.running:
obj.add_child(fil)
self.current_object = fil
else:
msg = 'Current object is not active, '\
'please select an active object.'
error(msg)
else:
if obj is None:
error('Please create a VTK scene and open some data first.')
else:
error('No data: cannot use a Filter/Module/ModuleManager.')
@recordable
def add_module(self, mod, obj=None):
"""Adds a module to the pipeline at an appropriate point. Adds it
to the selected object, or to an object passed through the
kwarg `obj`.
"""
self.add_filter(mod, obj=obj)
@recordable
def save_visualization(self, file_or_fname):
"""Given a file or a file name, this saves the current
visualization to the file.
"""
# Save the state of VTK's global warning display.
o = vtk.vtkObject
w = o.GetGlobalWarningDisplay()
o.SetGlobalWarningDisplay(0) # Turn it off.
try:
#FIXME: This is for streamline seed point widget position which
#does not get serialized correctly
if is_old_pipeline():
state_pickler.dump(self, file_or_fname)
else:
state = state_pickler.get_state(self)
st = state.scenes[0].children[0].children[0].children[4]
l_pos = st.seed.widget.position
st.seed.widget.position = [pos.item() for pos in l_pos]
saved_state = state_pickler.dumps(state)
file_or_fname.write(saved_state)
except (IndexError, AttributeError):
state_pickler.dump(self, file_or_fname)
finally:
# Reset the warning state.
o.SetGlobalWarningDisplay(w)
@recordable
def load_visualization(self, file_or_fname):
"""Given a file/file name this loads the visualization."""
# Save the state of VTK's global warning display.
o = vtk.vtkObject
w = o.GetGlobalWarningDisplay()
o.SetGlobalWarningDisplay(0) # Turn it off.
try:
# Get the state from the file.
state = state_pickler.load_state(file_or_fname)
state_pickler.update_state(state)
# Add the new scenes.
for scene_state in state.scenes:
self.new_scene()
scene = self.scenes[-1]
# Disable rendering initially.
if scene.scene is not None:
scene.scene.disable_render = True
# Update the state.
state_pickler.update_state(scene_state)
scene.__set_pure_state__(scene_state)
# Setting the state will automatically reset the
# disable_render.
scene.render()
finally:
# Reset the warning state.
o.SetGlobalWarningDisplay(w)
@recordable
def open(self, filename, scene=None):
"""Open a file given a filename if possible in either the
current scene or the passed `scene`.
"""
passed_scene = scene
reader = registry.get_file_reader(filename)
if reader is None:
msg = 'No suitable reader found for the file %s'%filename
error(msg)
else:
src = None
if scene is None:
scene = self.current_scene
if scene is None:
scene = self.new_scene()
try:
sc = scene.scene
if sc is not None:
sc.busy = True
callable = reader.get_callable()
if reader.factory is None:
src = callable()
src.initialize(filename)
else:
# Factory functions are passed the filename and a
# reference to the engine.
src = callable(filename, self)
if src is not None:
self.add_source(src, passed_scene)
finally:
if sc is not None:
sc.busy = False
if src is not None:
return src
def record(self, msg):
"""This is merely a convenience method to record messages to the
script recorder.
"""
r = self.recorder
if r is not None:
r.record(msg)
######################################################################
# Scene creation/deletion related methods.
######################################################################
def add_scene(self, scene, name=None):
"""Add given `scene` (a `pyface.tvtk.scene.Scene` instance) to
the mayavi engine so that mayavi can manage the scene. This
is used when the user creates a scene. Note that for the
`EnvisageEngine` this is automatically taken care of when you
create a new scene using the TVTK scene plugin.
Parameters:
-----------
scene - `pyface.tvtk.scene.Scene`
The scene that needs to be managed from mayavi.
name - `str`
The name assigned to the scene. It tries to determine the
name of the scene from the passed scene instance. If this
is not possible it defaults to 'Mayavi Scene'.
"""
if name is None:
if hasattr(scene, 'name'):
name = scene.name
else:
name = 'Mayavi Scene %d'%next(scene_id_generator)
s = Scene(scene=scene, name=name, parent=self)
s.start()
# We don't want the startup setup to be recorded.
recorder = self.recorder
self.scenes.append(s)
self.current_scene = s
if recorder is not None:
recorder.register(s)
@recordable
def remove_scene(self, scene, **kwargs):
"""Remove a given `scene` (a `pyface.tvtk.scene.Scene`
instance) from the mayavi engine if it is already being
managed by mayavi. Note that for the `EnvisageEngine` this is
automatically taken care of when you close a scene started
using the TVTK scene plugin.
Parameters:
-----------
scene - `pyface.tvtk.scene.Scene`
The scene that needs to be removed from mayavi.
"""
s = None
for index, x in enumerate(self.scenes):
if x.scene is scene:
s = x
break
if s is not None:
s.stop()
self.scenes.remove(s)
# Don't record it shutting down. To do this we must
# unregister it here so we don't record unnecessary calls.
recorder = self.recorder
if recorder is not None:
recorder.unregister(s)
# Remove the reference to the viewer if any.
if scene in self._viewer_ref:
del self._viewer_ref[scene]
# Clear the current scene if it has been removed.
if scene is self._current_scene:
self._current_scene = None
@recordable
def new_scene(self, viewer=None, name=None, **kwargs):
"""Create or manage a new VTK scene window. If no `viewer`
argument is provided, the method creates a new viewer using
`self.scene_factory`. If `self.scene_factory` is `None` then
it creates an `ivtk` viewer. This code requires that the
`viewer` has a `scene` attribute/trait that is a
`pyface.tvtk.scene.Scene`. It also works best if the viewer
supports `closing` and `activated` events.
The method returns the created viewer.
Parameters:
-----------
viewer - The viewer object, if None, one is created for you.
name - The name attribute of the viewer
``**kwargs`` - The extra keyword arguments are passed along to
the scene factory.
"""
if viewer is None:
factory_kwargs = {}
factory_kwargs_names = get_args(self.scene_factory)
for arg, value in kwargs.items():
if arg in factory_kwargs_names:
factory_kwargs[arg] = value
viewer = self.scene_factory(**factory_kwargs)
process_ui_events()
if name is not None:
viewer.name = name
# Hang on to a reference to this viewer, if not done this will cause a
# crash with Qt4. This because the viewer will be closed and gc'd if
# there isn't a reference to it. When the viewer is gc'd the scene is
# also closed and the engine will have a dead scene causing a crash.
self._viewer_ref[viewer.scene] = viewer
self.add_scene(viewer.scene)
if hasattr(viewer, 'on_trait_change'):
viewer.on_trait_change(self._on_scene_closed, 'closing')
viewer.on_trait_change(self._on_scene_activated, 'activated')
if hasattr(viewer, 'title'):
self.current_scene.sync_trait('name', viewer, 'title')
return viewer
@recordable
def close_scene(self, scene):
"""Given a scene created from new_scene, this method closes it
and removes the scene from the list of scenes we manage.
Parameters:
-----------
scene - `pyface.tvtk.scene.Scene` or an object that holds a
reference to a `pyface.tvtk.scene.Scene` in a `scene`
attribute.
"""
viewer = self.get_viewer(scene)
self.remove_scene(scene.scene)
if hasattr(scene, 'close'):
scene.close()
elif scene.scene is not None:
scene.scene.close()
if viewer is not None and hasattr(viewer, 'close'):
viewer.close()
def get_viewer(self, scene):
"""Return the viewer associated with a given scene.
Parameters:
-----------
scene - An `mayavi.core.scene.Scene` instance.
"""
return self._viewer_ref.get(scene.scene)
def dialog_view(self):
""" Default dialog view for Engine objects.
"""
return None
######################################################################
# Non-public interface
######################################################################
def _on_select(self, object):
"""Called by the EngineTree when an object on the view is
selected. This basically sets the current object and current
scene."""
self.current_selection = object
self._current_object = object
try:
scene = object.scene
for s in self.scenes:
if s.scene == scene:
self._current_scene = s
break
except AttributeError:
pass
def _get_current_scene(self):
n_scene = len(self.scenes)
if n_scene == 0:
return None
elif n_scene == 1:
return self.scenes[0]
elif self._current_scene is not None:
return self._current_scene
elif n_scene > 1:
return self.scenes[-1]
else:
return None
def _set_current_scene(self, scene):
old = self._current_scene
self._current_scene = scene
self.trait_property_changed('current_scene', old, scene)
def _get_current_object(self):
if self._current_object is not None:
return self._current_object
elif self.current_scene is not None:
return self.current_scene
else:
return None
def _set_current_object(self, object):
old = self._current_object
self._current_object = object
self.trait_property_changed('current_object', old, object)
def _get_current_selection(self):
return self._current_selection
def _set_current_selection(self, object):
old = self._current_selection
if not isinstance(object, (Base, AdderNode)):
object = None
self._current_selection = object
self.trait_property_changed('current_selection', old, object)
def _on_scene_closed(self, obj, name, old, new):
self.remove_scene(obj.scene)
def _on_scene_activated(self, obj, name, old, new):
for scene in self.scenes:
if scene.scene is obj.scene:
self.current_scene = scene
break
def _closed_fired(self):
""" When the engine is closed, clear the viewer ref which otherwise
stores references to scenes to prevent crash on QT4.
See: self.new_scene and MlabSceneModel._closed_fired
"""
self._viewer_ref.clear()
self.scenes = []
preference_manager.root.on_trait_change(self._show_helper_nodes_changed,
'show_helper_nodes',
remove=True)
registry.unregister_engine(self)
def _show_helper_nodes_changed(self):
self.trait_property_changed('children_ui_list', [],
self.children_ui_list)
def _get_children_ui_list(self):
""" Trait getter for children_ui_list Property.
"""
if preference_manager.root.show_helper_nodes \
and len(self.scenes) == 0:
return [SceneAdderNode(object=self)]
else:
return self.scenes
@on_trait_change('scenes[]')
def _trigger_children_ui_list(self, old, new):
""" Trigger a children_ui_list change when scenes changed.
"""
self.trait_property_changed('children_ui_list', old, new)
def _recorder_changed(self, old, new):
if new is not None:
new.record('# Recorded script from Mayavi2')
new.record('from numpy import array')
new.record('try:')
new.record(' engine = mayavi.engine')
new.record('except NameError:')
new.record(' from mayavi.api import Engine')
new.record(' engine = Engine()')
new.record(' engine.start()')
new.record('if len(engine.scenes) == 0:')
new.record(' engine.new_scene()')
new.record('# ------------------------------------------- ')
elif old is not None:
old.record('# ------------------------------------------- ')
old.record('from mayavi.tools.show import show')
old.record('show()')
|
dmsurti/mayavi
|
mayavi/core/engine.py
|
Python
|
bsd-3-clause
| 22,628
|
[
"Mayavi",
"VTK"
] |
797667e47f04063ac4b0181f7c7725f3c67a50d4abdec2f7844a2fd8af0db0c2
|
#!python
# -*- Python -*-
# Copyright 2014-2015 Brian Olson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import re
import struct
import sys
_IS_PY3 = sys.version_info[0] >= 3
if _IS_PY3:
from io import BytesIO as StringIO
else:
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
CBOR_TYPE_MASK = 0xE0 # top 3 bits
CBOR_INFO_BITS = 0x1F # low 5 bits
CBOR_UINT = 0x00
CBOR_NEGINT = 0x20
CBOR_BYTES = 0x40
CBOR_TEXT = 0x60
CBOR_ARRAY = 0x80
CBOR_MAP = 0xA0
CBOR_TAG = 0xC0
CBOR_7 = 0xE0 # float and other types
CBOR_UINT8_FOLLOWS = 24 # 0x18
CBOR_UINT16_FOLLOWS = 25 # 0x19
CBOR_UINT32_FOLLOWS = 26 # 0x1a
CBOR_UINT64_FOLLOWS = 27 # 0x1b
CBOR_VAR_FOLLOWS = 31 # 0x1f
CBOR_BREAK = 0xFF
CBOR_FALSE = (CBOR_7 | 20)
CBOR_TRUE = (CBOR_7 | 21)
CBOR_NULL = (CBOR_7 | 22)
CBOR_UNDEFINED = (CBOR_7 | 23) # js 'undefined' value
CBOR_FLOAT16 = (CBOR_7 | 25)
CBOR_FLOAT32 = (CBOR_7 | 26)
CBOR_FLOAT64 = (CBOR_7 | 27)
CBOR_TAG_DATE_STRING = 0 # RFC3339
CBOR_TAG_DATE_ARRAY = 1 # any number type follows, seconds since 1970-01-01T00:00:00 UTC
CBOR_TAG_BIGNUM = 2 # big endian byte string follows
CBOR_TAG_NEGBIGNUM = 3 # big endian byte string follows
CBOR_TAG_DECIMAL = 4 # [ 10^x exponent, number ]
CBOR_TAG_BIGFLOAT = 5 # [ 2^x exponent, number ]
CBOR_TAG_BASE64URL = 21
CBOR_TAG_BASE64 = 22
CBOR_TAG_BASE16 = 23
CBOR_TAG_CBOR = 24 # following byte string is embedded CBOR data
CBOR_TAG_URI = 32
CBOR_TAG_BASE64URL = 33
CBOR_TAG_BASE64 = 34
CBOR_TAG_REGEX = 35
CBOR_TAG_MIME = 36 # following text is MIME message, headers, separators and all
CBOR_TAG_CBOR_FILEHEADER = 55799 # can open a file with 0xd9d9f7
_CBOR_TAG_BIGNUM_BYTES = struct.pack('B', CBOR_TAG | CBOR_TAG_BIGNUM)
def dumps_int(val):
"return bytes representing int val in CBOR"
if val >= 0:
# CBOR_UINT is 0, so I'm lazy/efficient about not OR-ing it in.
if val <= 23:
return struct.pack('B', val)
if val <= 0x0ff:
return struct.pack('BB', CBOR_UINT8_FOLLOWS, val)
if val <= 0x0ffff:
return struct.pack('!BH', CBOR_UINT16_FOLLOWS, val)
if val <= 0x0ffffffff:
return struct.pack('!BI', CBOR_UINT32_FOLLOWS, val)
if val <= 0x0ffffffffffffffff:
return struct.pack('!BQ', CBOR_UINT64_FOLLOWS, val)
outb = _dumps_bignum_to_bytearray(val)
return _CBOR_TAG_BIGNUM_BYTES + _encode_type_num(CBOR_BYTES, len(outb)) + outb
val = -1 - val
return _encode_type_num(CBOR_NEGINT, val)
if _IS_PY3:
def _dumps_bignum_to_bytearray(val):
out = []
while val > 0:
out.insert(0, val & 0x0ff)
val = val >> 8
return bytes(out)
else:
def _dumps_bignum_to_bytearray(val):
out = []
while val > 0:
out.insert(0, chr(val & 0x0ff))
val = val >> 8
return b''.join(out)
def dumps_float(val):
return struct.pack("!Bd", CBOR_FLOAT64, val)
_CBOR_TAG_NEGBIGNUM_BYTES = struct.pack('B', CBOR_TAG | CBOR_TAG_NEGBIGNUM)
def _encode_type_num(cbor_type, val):
"""For some CBOR primary type [0..7] and an auxiliary unsigned number, return CBOR encoded bytes"""
assert val >= 0
if val <= 23:
return struct.pack('B', cbor_type | val)
if val <= 0x0ff:
return struct.pack('BB', cbor_type | CBOR_UINT8_FOLLOWS, val)
if val <= 0x0ffff:
return struct.pack('!BH', cbor_type | CBOR_UINT16_FOLLOWS, val)
if val <= 0x0ffffffff:
return struct.pack('!BI', cbor_type | CBOR_UINT32_FOLLOWS, val)
if (((cbor_type == CBOR_NEGINT) and (val <= 0x07fffffffffffffff)) or
((cbor_type != CBOR_NEGINT) and (val <= 0x0ffffffffffffffff))):
return struct.pack('!BQ', cbor_type | CBOR_UINT64_FOLLOWS, val)
if cbor_type != CBOR_NEGINT:
raise Exception("value too big for CBOR unsigned number: {0!r}".format(val))
outb = _dumps_bignum_to_bytearray(val)
return _CBOR_TAG_NEGBIGNUM_BYTES + _encode_type_num(CBOR_BYTES, len(outb)) + outb
if _IS_PY3:
def _is_unicode(val):
return isinstance(val, str)
else:
def _is_unicode(val):
return isinstance(val, unicode)
def dumps_string(val, is_text=None, is_bytes=None):
if _is_unicode(val):
val = val.encode('utf8')
is_text = True
is_bytes = False
if (is_bytes) or not (is_text == True):
return _encode_type_num(CBOR_BYTES, len(val)) + val
return _encode_type_num(CBOR_TEXT, len(val)) + val
def dumps_array(arr, sort_keys=False):
head = _encode_type_num(CBOR_ARRAY, len(arr))
parts = [dumps(x, sort_keys=sort_keys) for x in arr]
return head + b''.join(parts)
if _IS_PY3:
def dumps_dict(d, sort_keys=False):
head = _encode_type_num(CBOR_MAP, len(d))
parts = [head]
if sort_keys:
for k in sorted(d.keys()):
v = d[k]
parts.append(dumps(k, sort_keys=sort_keys))
parts.append(dumps(v, sort_keys=sort_keys))
else:
for k,v in d.items():
parts.append(dumps(k, sort_keys=sort_keys))
parts.append(dumps(v, sort_keys=sort_keys))
return b''.join(parts)
else:
def dumps_dict(d, sort_keys=False):
head = _encode_type_num(CBOR_MAP, len(d))
parts = [head]
if sort_keys:
for k in sorted(d.iterkeys()):
v = d[k]
parts.append(dumps(k, sort_keys=sort_keys))
parts.append(dumps(v, sort_keys=sort_keys))
else:
for k,v in d.iteritems():
parts.append(dumps(k, sort_keys=sort_keys))
parts.append(dumps(v, sort_keys=sort_keys))
return b''.join(parts)
def dumps_bool(b):
if b:
return struct.pack('B', CBOR_TRUE)
return struct.pack('B', CBOR_FALSE)
def dumps_tag(t, sort_keys=False):
return _encode_type_num(CBOR_TAG, t.tag) + dumps(t.value, sort_keys=sort_keys)
if _IS_PY3:
def _is_stringish(x):
return isinstance(x, (str, bytes))
def _is_intish(x):
return isinstance(x, int)
else:
def _is_stringish(x):
return isinstance(x, (str, basestring, bytes, unicode))
def _is_intish(x):
return isinstance(x, (int, long))
def dumps(ob, sort_keys=False):
if ob is None:
return struct.pack('B', CBOR_NULL)
if isinstance(ob, bool):
return dumps_bool(ob)
if _is_stringish(ob):
return dumps_string(ob)
if isinstance(ob, (list, tuple)):
return dumps_array(ob, sort_keys=sort_keys)
# TODO: accept other enumerables and emit a variable length array
if isinstance(ob, dict):
return dumps_dict(ob, sort_keys=sort_keys)
if isinstance(ob, float):
return dumps_float(ob)
if _is_intish(ob):
return dumps_int(ob)
if isinstance(ob, Tag):
return dumps_tag(ob, sort_keys=sort_keys)
raise Exception("don't know how to cbor serialize object of type %s", type(ob))
# same basic signature as json.dump, but with no options (yet)
def dump(obj, fp, sort_keys=False):
"""
obj: Python object to serialize
fp: file-like object capable of .write(bytes)
"""
# this is kinda lame, but probably not inefficient for non-huge objects
# TODO: .write() to fp as we go as each inner object is serialized
blob = dumps(obj, sort_keys=sort_keys)
fp.write(blob)
class Tag(object):
def __init__(self, tag=None, value=None):
self.tag = tag
self.value = value
def __repr__(self):
return "Tag({0!r}, {1!r})".format(self.tag, self.value)
def __eq__(self, other):
if not isinstance(other, Tag):
return False
return (self.tag == other.tag) and (self.value == other.value)
def loads(data):
"""
Parse CBOR bytes and return Python objects.
"""
if data is None:
raise ValueError("got None for buffer to decode in loads")
fp = StringIO(data)
return _loads(fp)[0]
def load(fp):
"""
Parse and return object from fp, a file-like object supporting .read(n)
"""
return _loads(fp)[0]
_MAX_DEPTH = 100
def _tag_aux(fp, tb):
bytes_read = 1
tag = tb & CBOR_TYPE_MASK
tag_aux = tb & CBOR_INFO_BITS
if tag_aux <= 23:
aux = tag_aux
elif tag_aux == CBOR_UINT8_FOLLOWS:
data = fp.read(1)
aux = struct.unpack_from("!B", data, 0)[0]
bytes_read += 1
elif tag_aux == CBOR_UINT16_FOLLOWS:
data = fp.read(2)
aux = struct.unpack_from("!H", data, 0)[0]
bytes_read += 2
elif tag_aux == CBOR_UINT32_FOLLOWS:
data = fp.read(4)
aux = struct.unpack_from("!I", data, 0)[0]
bytes_read += 4
elif tag_aux == CBOR_UINT64_FOLLOWS:
data = fp.read(8)
aux = struct.unpack_from("!Q", data, 0)[0]
bytes_read += 8
else:
assert tag_aux == CBOR_VAR_FOLLOWS, "bogus tag {0:02x}".format(tb)
aux = None
return tag, tag_aux, aux, bytes_read
def _read_byte(fp):
tb = fp.read(1)
if len(tb) == 0:
# I guess not all file-like objects do this
raise EOFError()
return ord(tb)
def _loads_var_array(fp, limit, depth, returntags, bytes_read):
ob = []
tb = _read_byte(fp)
while tb != CBOR_BREAK:
(subob, sub_len) = _loads_tb(fp, tb, limit, depth, returntags)
bytes_read += 1 + sub_len
ob.append(subob)
tb = _read_byte(fp)
return (ob, bytes_read + 1)
def _loads_var_map(fp, limit, depth, returntags, bytes_read):
ob = {}
tb = _read_byte(fp)
while tb != CBOR_BREAK:
(subk, sub_len) = _loads_tb(fp, tb, limit, depth, returntags)
bytes_read += 1 + sub_len
(subv, sub_len) = _loads(fp, limit, depth, returntags)
bytes_read += sub_len
ob[subk] = subv
tb = _read_byte(fp)
return (ob, bytes_read + 1)
if _IS_PY3:
def _loads_array(fp, limit, depth, returntags, aux, bytes_read):
ob = []
for i in range(aux):
subob, subpos = _loads(fp)
bytes_read += subpos
ob.append(subob)
return ob, bytes_read
def _loads_map(fp, limit, depth, returntags, aux, bytes_read):
ob = {}
for i in range(aux):
subk, subpos = _loads(fp)
bytes_read += subpos
subv, subpos = _loads(fp)
bytes_read += subpos
ob[subk] = subv
return ob, bytes_read
else:
def _loads_array(fp, limit, depth, returntags, aux, bytes_read):
ob = []
for i in xrange(aux):
subob, subpos = _loads(fp)
bytes_read += subpos
ob.append(subob)
return ob, bytes_read
def _loads_map(fp, limit, depth, returntags, aux, bytes_read):
ob = {}
for i in xrange(aux):
subk, subpos = _loads(fp)
bytes_read += subpos
subv, subpos = _loads(fp)
bytes_read += subpos
ob[subk] = subv
return ob, bytes_read
def _loads(fp, limit=None, depth=0, returntags=False):
"return (object, bytes read)"
if depth > _MAX_DEPTH:
raise Exception("hit CBOR loads recursion depth limit")
tb = _read_byte(fp)
return _loads_tb(fp, tb, limit, depth, returntags)
def _loads_tb(fp, tb, limit=None, depth=0, returntags=False):
# Some special cases of CBOR_7 best handled by special struct.unpack logic here
if tb == CBOR_FLOAT16:
data = fp.read(2)
hibyte, lowbyte = struct.unpack_from("BB", data, 0)
exp = (hibyte >> 2) & 0x1F
mant = ((hibyte & 0x03) << 8) | lowbyte
if exp == 0:
val = mant * (2.0 ** -24)
elif exp == 31:
if mant == 0:
val = float('Inf')
else:
val = float('NaN')
else:
val = (mant + 1024.0) * (2 ** (exp - 25))
if hibyte & 0x80:
val = -1.0 * val
return (val, 3)
elif tb == CBOR_FLOAT32:
data = fp.read(4)
pf = struct.unpack_from("!f", data, 0)
return (pf[0], 5)
elif tb == CBOR_FLOAT64:
data = fp.read(8)
pf = struct.unpack_from("!d", data, 0)
return (pf[0], 9)
tag, tag_aux, aux, bytes_read = _tag_aux(fp, tb)
if tag == CBOR_UINT:
return (aux, bytes_read)
elif tag == CBOR_NEGINT:
return (-1 - aux, bytes_read)
elif tag == CBOR_BYTES:
ob, subpos = loads_bytes(fp, aux)
return (ob, bytes_read + subpos)
elif tag == CBOR_TEXT:
raw, subpos = loads_bytes(fp, aux, btag=CBOR_TEXT)
ob = raw.decode('utf8')
return (ob, bytes_read + subpos)
elif tag == CBOR_ARRAY:
if aux is None:
return _loads_var_array(fp, limit, depth, returntags, bytes_read)
return _loads_array(fp, limit, depth, returntags, aux, bytes_read)
elif tag == CBOR_MAP:
if aux is None:
return _loads_var_map(fp, limit, depth, returntags, bytes_read)
return _loads_map(fp, limit, depth, returntags, aux, bytes_read)
elif tag == CBOR_TAG:
ob, subpos = _loads(fp)
bytes_read += subpos
if returntags:
# Don't interpret the tag, return it and the tagged object.
ob = Tag(aux, ob)
else:
# attempt to interpet the tag and the value into a Python object.
ob = tagify(ob, aux)
return ob, bytes_read
elif tag == CBOR_7:
if tb == CBOR_TRUE:
return (True, bytes_read)
if tb == CBOR_FALSE:
return (False, bytes_read)
if tb == CBOR_NULL:
return (None, bytes_read)
if tb == CBOR_UNDEFINED:
return (None, bytes_read)
raise ValueError("unknown cbor tag 7 byte: {:02x}".format(tb))
def loads_bytes(fp, aux, btag=CBOR_BYTES):
# TODO: limit to some maximum number of chunks and some maximum total bytes
if aux is not None:
# simple case
ob = fp.read(aux)
return (ob, aux)
# read chunks of bytes
chunklist = []
total_bytes_read = 0
while True:
tb = fp.read(1)[0]
if not _IS_PY3:
tb = ord(tb)
if tb == CBOR_BREAK:
total_bytes_read += 1
break
tag, tag_aux, aux, bytes_read = _tag_aux(fp, tb)
assert tag == btag, 'variable length value contains unexpected component'
ob = fp.read(aux)
chunklist.append(ob)
total_bytes_read += bytes_read + aux
return (b''.join(chunklist), total_bytes_read)
if _IS_PY3:
def _bytes_to_biguint(bs):
out = 0
for ch in bs:
out = out << 8
out = out | ch
return out
else:
def _bytes_to_biguint(bs):
out = 0
for ch in bs:
out = out << 8
out = out | ord(ch)
return out
def tagify(ob, aux):
# TODO: make this extensible?
# cbor.register_tag_handler(tagnumber, tag_handler)
# where tag_handler takes (tagnumber, tagged_object)
if aux == CBOR_TAG_DATE_STRING:
# TODO: parse RFC3339 date string
pass
if aux == CBOR_TAG_DATE_ARRAY:
return datetime.datetime.utcfromtimestamp(ob)
if aux == CBOR_TAG_BIGNUM:
return _bytes_to_biguint(ob)
if aux == CBOR_TAG_NEGBIGNUM:
return -1 - _bytes_to_biguint(ob)
if aux == CBOR_TAG_REGEX:
# Is this actually a good idea? Should we just return the tag and the raw value to the user somehow?
return re.compile(ob)
return Tag(aux, ob)
|
hcrlab/access_teleop
|
rosbridge_suite/rosbridge_library/src/rosbridge_library/util/cbor.py
|
Python
|
mit
| 16,169
|
[
"Brian"
] |
78595d2ec21684826711c27e9715176864ef11a17b367b98fdb4849c322bda86
|
from __future__ import print_function
from alge import Case, of, datatype
from timeit import repeat
Op = datatype("Op", ['a', 'b'])
class Do(Case):
@of("Op(a, b)")
def op(self, a, b):
return a + b
class Visitor(object):
def visit(self, val):
fn = getattr(self, "visit_%s" % type(val).__name__)
return fn(val)
def visit_Op(self, val):
return val.a + val.b
args = 1, 2
op = Op(*args)
t_alge = min(repeat(lambda: Do(op), repeat=3, number=1000))
t_vis = min(repeat(lambda: Visitor().visit(op), repeat=3, number=1000))
print('t_alge', t_alge)
print('t_vis ', t_vis)
|
ContinuumIO/pyalge
|
speed.py
|
Python
|
bsd-2-clause
| 580
|
[
"VisIt"
] |
89a9a156a1c814d708c6a0a1a37b57b77a4da2eba2119052b46f86223c072ed6
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Created on Apr 25, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Apr 25, 2012"
import random
from pymatgen.core.lattice import Lattice
from pymatgen.util.coord_utils import *
from pymatgen.util.testing import PymatgenTest
class CoordUtilsTest(PymatgenTest):
def test_get_linear_interpolated_value(self):
xvals = [0, 1, 2, 3, 4, 5]
yvals = [3, 6, 7, 8, 10, 12]
self.assertEqual(get_linear_interpolated_value(xvals, yvals, 3.6), 9.2)
self.assertRaises(ValueError, get_linear_interpolated_value, xvals,
yvals, 6)
def test_in_coord_list(self):
coords = [[0, 0, 0], [0.5, 0.5, 0.5]]
test_coord = [0.1, 0.1, 0.1]
self.assertFalse(in_coord_list(coords, test_coord))
self.assertTrue(in_coord_list(coords, test_coord, atol=0.15))
self.assertFalse(in_coord_list([0.99, 0.99, 0.99], test_coord,
atol=0.15))
def test_is_coord_subset(self):
c1 = [0,0,0]
c2 = [0,1.2,-1]
c3 = [3,2,1]
c4 = [3-9e-9, 2-9e-9, 1-9e-9]
self.assertTrue(is_coord_subset([c1, c2, c3], [c1, c4, c2]))
self.assertTrue(is_coord_subset([c1], [c2, c1]))
self.assertTrue(is_coord_subset([c1, c2], [c2, c1]))
self.assertFalse(is_coord_subset([c1, c2], [c2, c3]))
self.assertFalse(is_coord_subset([c1, c2], [c2]))
def test_coord_list_mapping(self):
c1 = [0,.124,0]
c2 = [0,1.2,-1]
c3 = [3,2,1]
a = np.array([c1, c2])
b = np.array([c3, c2, c1])
inds = coord_list_mapping(a, b)
self.assertTrue(np.allclose(a, b[inds]))
self.assertRaises(Exception, coord_list_mapping, [c1,c2], [c2,c3])
self.assertRaises(Exception, coord_list_mapping, [c2], [c2,c2])
def test_coord_list_mapping_pbc(self):
c1 = [0.1, 0.2, 0.3]
c2 = [0.2, 0.3, 0.3]
c3 = [0.5, 0.3, 0.6]
c4 = [1.5, -0.7, -1.4]
a = np.array([c1, c3, c2])
b = np.array([c4, c2, c1])
inds = coord_list_mapping_pbc(a, b)
diff = a - b[inds]
diff -= np.round(diff)
self.assertTrue(np.allclose(diff, 0))
self.assertRaises(Exception, coord_list_mapping_pbc, [c1,c2], [c2,c3])
self.assertRaises(Exception, coord_list_mapping_pbc, [c2], [c2,c2])
def test_find_in_coord_list(self):
coords = [[0, 0, 0], [0.5, 0.5, 0.5]]
test_coord = [0.1, 0.1, 0.1]
self.assertFalse(find_in_coord_list(coords, test_coord))
self.assertEqual(find_in_coord_list(coords, test_coord, atol=0.15)[0],
0)
self.assertFalse(find_in_coord_list([0.99, 0.99, 0.99], test_coord,
atol=0.15))
coords = [[0, 0, 0], [0.5, 0.5, 0.5], [0.1, 0.1, 0.1]]
self.assertArrayEqual(find_in_coord_list(coords, test_coord,
atol=0.15), [0, 2])
def test_all_distances(self):
coords1 = [[0, 0, 0], [0.5, 0.5, 0.5]]
coords2 = [[1, 2, -1], [1, 0, 0], [1, 0, 0]]
result = [[2.44948974, 1, 1], [2.17944947, 0.8660254, 0.8660254]]
self.assertArrayAlmostEqual(all_distances(coords1, coords2), result, 4)
def test_pbc_diff(self):
self.assertArrayAlmostEqual(pbc_diff([0.1, 0.1, 0.1], [0.3, 0.5, 0.9]),
[-0.2, -0.4, 0.2])
self.assertArrayAlmostEqual(pbc_diff([0.9, 0.1, 1.01],
[0.3, 0.5, 0.9]),
[-0.4, -0.4, 0.11])
self.assertArrayAlmostEqual(pbc_diff([0.1, 0.6, 1.01],
[0.6, 0.1, 0.9]),
[-0.5, 0.5, 0.11])
self.assertArrayAlmostEqual(pbc_diff([100.1, 0.2, 0.3],
[0123123.4, 0.5, 502312.6]),
[-0.3, -0.3, -0.3])
def test_in_coord_list_pbc(self):
coords = [[0, 0, 0], [0.5, 0.5, 0.5]]
test_coord = [0.1, 0.1, 0.1]
self.assertFalse(in_coord_list_pbc(coords, test_coord))
self.assertTrue(in_coord_list_pbc(coords, test_coord, atol=0.15))
test_coord = [0.99, 0.99, 0.99]
self.assertFalse(in_coord_list_pbc(coords, test_coord, atol=0.01))
def test_find_in_coord_list_pbc(self):
coords = [[0, 0, 0], [0.5, 0.5, 0.5]]
test_coord = [0.1, 0.1, 0.1]
self.assertFalse(find_in_coord_list_pbc(coords, test_coord))
self.assertEqual(find_in_coord_list_pbc(coords, test_coord,
atol=0.15)[0], 0)
test_coord = [0.99, 0.99, 0.99]
self.assertEqual(
find_in_coord_list_pbc(coords, test_coord, atol=0.02)[0], 0)
test_coord = [-0.499, -0.499, -0.499]
self.assertEqual(
find_in_coord_list_pbc(coords, test_coord, atol=0.01)[0], 1)
def test_is_coord_subset_pbc(self):
c1 = [0, 0, 0]
c2 = [0, 1.2, -1]
c3 = [2.3, 0, 1]
c4 = [1.3-9e-9, -1-9e-9, 1-9e-9]
self.assertTrue(is_coord_subset_pbc([c1, c2, c3], [c1, c4, c2]))
self.assertTrue(is_coord_subset_pbc([c1], [c2, c1]))
self.assertTrue(is_coord_subset_pbc([c1, c2], [c2, c1]))
self.assertFalse(is_coord_subset_pbc([c1, c2], [c2, c3]))
self.assertFalse(is_coord_subset_pbc([c1, c2], [c2]))
# test tolerances
c5 = [0.1, 0.1, 0.2]
atol1 = [0.25, 0.15, 0.15]
atol2 = [0.15, 0.15, 0.25]
self.assertFalse(is_coord_subset_pbc([c1], [c5], atol1))
self.assertTrue(is_coord_subset_pbc([c1], [c5], atol2))
# test mask
mask1 = [[True]]
self.assertFalse(is_coord_subset_pbc([c1], [c5], atol2, mask1))
mask2 = [[True, False]]
self.assertTrue(is_coord_subset_pbc([c1], [c2, c1], mask=mask2))
self.assertFalse(is_coord_subset_pbc([c1], [c1, c2], mask=mask2))
mask3 = [[False, True]]
self.assertFalse(is_coord_subset_pbc([c1], [c2, c1], mask=mask3))
self.assertTrue(is_coord_subset_pbc([c1], [c1, c2], mask=mask3))
def test_lattice_points_in_supercell(self):
supercell = np.array([[1, 3, 5], [-3, 2, 3], [-5, 3, 1]])
points = lattice_points_in_supercell(supercell)
self.assertAlmostEqual(len(points), abs(np.linalg.det(supercell)))
self.assertGreaterEqual(np.min(points), -1e-10)
self.assertLessEqual(np.max(points), 1-1e-10)
supercell = np.array([[-5, -5, -3], [0, -4, -2], [0, -5, -2]])
points = lattice_points_in_supercell(supercell)
self.assertAlmostEqual(len(points), abs(np.linalg.det(supercell)))
self.assertGreaterEqual(np.min(points), -1e-10)
self.assertLessEqual(np.max(points), 1-1e-10)
def test_barycentric(self):
#2d test
simplex1 = np.array([[0.3, 0.1], [0.2, -1.2], [1.3, 2.3]])
pts1 = np.array([[0.6, 0.1], [1.3, 2.3], [0.5, 0.5], [.7, 1]])
output1 = barycentric_coords(pts1, simplex1)
#do back conversion to cartesian
o_dot_s = np.sum(output1[:, :, None] * simplex1[None, :, :], axis=1)
self.assertTrue(np.allclose(pts1, o_dot_s))
#do 3d tests
simplex2 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 0, 0]])
pts2 = np.array([[0, 0, 1], [0, 0.5, 0.5], [1./3, 1./3, 1./3]])
output2 = barycentric_coords(pts2, simplex2)
self.assertTrue(np.allclose(output2[1], [0.5, 0.5, 0, 0]))
#do back conversion to cartesian
o_dot_s = np.sum(output2[:, :, None] * simplex2[None, :, :], axis=1)
self.assertTrue(np.allclose(pts2, o_dot_s))
#test single point
self.assertTrue(np.allclose(output2[2],
barycentric_coords(pts2[2], simplex2)))
def test_pbc_shortest_vectors(self):
fcoords = np.array([[0.3, 0.3, 0.5],
[0.1, 0.1, 0.3],
[0.9, 0.9, 0.8],
[0.1, 0.0, 0.5],
[0.9, 0.7, 0.0]])
lattice = Lattice.from_lengths_and_angles([8, 8, 4],
[90, 76, 58])
expected = np.array([[0.000, 3.015, 4.072, 3.519, 3.245],
[3.015, 0.000, 3.207, 1.131, 4.453],
[4.072, 3.207, 0.000, 2.251, 1.788],
[3.519, 1.131, 2.251, 0.000, 3.852]])
vectors = pbc_shortest_vectors(lattice, fcoords[:-1], fcoords)
dists = np.sum(vectors**2, axis = -1)**0.5
self.assertArrayAlmostEqual(dists, expected, 3)
#now try with small loop threshold
from pymatgen.util import coord_utils
prev_threshold = coord_utils.LOOP_THRESHOLD
coord_utils.LOOP_THRESHOLD = 0
vectors = pbc_shortest_vectors(lattice, fcoords[:-1], fcoords)
dists = np.sum(vectors**2, axis = -1)**0.5
self.assertArrayAlmostEqual(dists, expected, 3)
coord_utils.LOOP_THRESHOLD = prev_threshold
def test_get_angle(self):
v1 = (1, 0, 0)
v2 = (1, 1, 1)
self.assertAlmostEqual(get_angle(v1, v2), 54.7356103172)
self.assertAlmostEqual(get_angle(v1, v2, units="radians"),
0.9553166181245092)
class SimplexTest(PymatgenTest):
def setUp(self):
coords = []
coords.append([0, 0, 0])
coords.append([0, 1, 0])
coords.append([0, 0, 1])
coords.append([1, 0, 0])
self.simplex = Simplex(coords)
def test_equal(self):
c2 = list(self.simplex.coords)
random.shuffle(c2)
self.assertEqual(Simplex(c2), self.simplex)
def test_in_simplex(self):
self.assertTrue(self.simplex.in_simplex([0.1, 0.1, 0.1]))
self.assertFalse(self.simplex.in_simplex([0.6, 0.6, 0.6]))
for i in range(10):
coord = np.random.random_sample(size=3) / 3
self.assertTrue(self.simplex.in_simplex(coord))
def test_2dtriangle(self):
s = Simplex([[0, 1], [1, 1], [1, 0]])
self.assertArrayAlmostEqual(s.bary_coords([0.5, 0.5]),
[0.5, 0, 0.5])
self.assertArrayAlmostEqual(s.bary_coords([0.5, 1]), [0.5, 0.5, 0])
self.assertArrayAlmostEqual(s.bary_coords([0.5, 0.75]), [0.5, 0.25, 0.25])
self.assertArrayAlmostEqual(s.bary_coords([0.75, 0.75]), [0.25, 0.5, 0.25])
s = Simplex([[1, 1], [1, 0]])
self.assertRaises(ValueError, s.bary_coords, [0.5, 0.5])
def test_volume(self):
# Should be value of a right tetrahedron.
self.assertAlmostEqual(self.simplex.volume, 1/6)
def test_str(self):
self.assertTrue(str(self.simplex).startswith("3-simplex in 4D space"))
self.assertTrue(repr(self.simplex).startswith("3-simplex in 4D space"))
if __name__ == "__main__":
import unittest2 as unittest
unittest.main()
|
aykol/pymatgen
|
pymatgen/util/tests/test_coord_utils.py
|
Python
|
mit
| 11,338
|
[
"pymatgen"
] |
da87fa0c970b2fcad241b90859883be8af82a9e502f3daa771ea4352ff69bbcd
|
"""Standard test images.
For more images, see
- http://sipi.usc.edu/database/database.php
"""
import os as _os
from .. import data_dir
from ..io import imread, use_plugin
from ._binary_blobs import binary_blobs
__all__ = ['load',
'camera',
'lena',
'text',
'checkerboard',
'coins',
'moon',
'page',
'horse',
'clock',
'immunohistochemistry',
'chelsea',
'coffee',
'hubble_deep_field',
'rocket',
'astronaut']
def load(f):
"""Load an image file located in the data directory.
Parameters
----------
f : string
File name.
Returns
-------
img : ndarray
Image loaded from ``skimage.data_dir``.
"""
use_plugin('pil')
return imread(_os.path.join(data_dir, f))
def camera():
"""Gray-level "camera" image.
Often used for segmentation and denoising examples.
"""
return load("camera.png")
def lena():
"""Colour "Lena" image.
The standard, yet sometimes controversial Lena test image was
scanned from the November 1972 edition of Playboy magazine. From
an image processing perspective, this image is useful because it
contains smooth, textured, shaded as well as detail areas.
"""
return load("lena.png")
def astronaut():
"""Colour image of the astronaut Eileen Collins.
Photograph of Eileen Collins, an American astronaut. She was selected
as an astronaut in 1992 and first piloted the space shuttle STS-63 in
1995. She retired in 2006 after spending a total of 38 days, 8 hours
and 10 minutes in outer space.
This image was downloaded from the NASA Great Images database
<http://grin.hq.nasa.gov/ABSTRACTS/GPN-2000-001177.html>`__.
No known copyright restrictions, released into the public domain.
"""
return load("astronaut.png")
def text():
"""Gray-level "text" image used for corner detection.
Notes
-----
This image was downloaded from Wikipedia
<http://en.wikipedia.org/wiki/File:Corner.png>`__.
No known copyright restrictions, released into the public domain.
"""
return load("text.png")
def checkerboard():
"""Checkerboard image.
Checkerboards are often used in image calibration, since the
corner-points are easy to locate. Because of the many parallel
edges, they also visualise distortions particularly well.
"""
return load("chessboard_GRAY.png")
def coins():
"""Greek coins from Pompeii.
This image shows several coins outlined against a gray background.
It is especially useful in, e.g. segmentation tests, where
individual objects need to be identified against a background.
The background shares enough grey levels with the coins that a
simple segmentation is not sufficient.
Notes
-----
This image was downloaded from the
`Brooklyn Museum Collection
<http://www.brooklynmuseum.org/opencollection/archives/image/617/image>`__.
No known copyright restrictions.
"""
return load("coins.png")
def moon():
"""Surface of the moon.
This low-contrast image of the surface of the moon is useful for
illustrating histogram equalization and contrast stretching.
"""
return load("moon.png")
def page():
"""Scanned page.
This image of printed text is useful for demonstrations requiring uneven
background illumination.
"""
return load("page.png")
def horse():
"""Black and white silhouette of a horse.
This image was downloaded from
`openclipart <http://openclipart.org/detail/158377/horse-by-marauder>`
Released into public domain and drawn and uploaded by Andreas Preuss
(marauder).
"""
return load("horse.png")
def clock():
"""Motion blurred clock.
This photograph of a wall clock was taken while moving the camera in an
aproximately horizontal direction. It may be used to illustrate
inverse filters and deconvolution.
Released into the public domain by the photographer (Stefan van der Walt).
"""
return load("clock_motion.png")
def immunohistochemistry():
"""Immunohistochemical (IHC) staining with hematoxylin counterstaining.
This picture shows colonic glands where the IHC expression of FHL2 protein
is revealed with DAB. Hematoxylin counterstaining is applied to enhance the
negative parts of the tissue.
This image was acquired at the Center for Microscopy And Molecular Imaging
(CMMI).
No known copyright restrictions.
"""
return load("ihc.png")
def chelsea():
"""Chelsea the cat.
An example with texture, prominent edges in horizontal and diagonal
directions, as well as features of differing scales.
Notes
-----
No copyright restrictions. CC0 by the photographer (Stefan van der Walt).
"""
return load("chelsea.png")
def coffee():
"""Coffee cup.
This photograph is courtesy of Pikolo Espresso Bar.
It contains several elliptical shapes as well as varying texture (smooth
porcelain to course wood grain).
Notes
-----
No copyright restrictions. CC0 by the photographer (Rachel Michetti).
"""
return load("coffee.png")
def hubble_deep_field():
"""Hubble eXtreme Deep Field.
This photograph contains the Hubble Telescope's farthest ever view of
the universe. It can be useful as an example for multi-scale
detection.
Notes
-----
This image was downloaded from
`HubbleSite
<http://hubblesite.org/newscenter/archive/releases/2012/37/image/a/>`__.
The image was captured by NASA and `may be freely used in the
public domain <http://www.nasa.gov/audience/formedia/features/MP_Photo_Guidelines.html>`_.
"""
return load("hubble_deep_field.jpg")
def rocket():
"""Launch photo of DSCOVR on Falcon 9 by SpaceX.
This is the launch photo of Falcon 9 carrying DSCOVR lifted off from
SpaceX's Launch Complex 40 at Cape Canaveral Air Force Station, FL.
Notes
-----
This image was downloaded from
`SpaceX Photos
<https://www.flickr.com/photos/spacexphotos/16511594820/in/photostream/>`__.
The image was captured by SpaceX and `released in the public domain
<http://arstechnica.com/tech-policy/2015/03/elon-musk-puts-spacex-photos-into-the-public-domain/>`_.
"""
return load("rocket.jpg")
|
michaelpacer/scikit-image
|
skimage/data/__init__.py
|
Python
|
bsd-3-clause
| 6,482
|
[
"ESPResSo"
] |
a5e8e342eeece83e4554e0141f1f04eb159d177b72bf8fd9fe006668b1545a5d
|
'''
James D. Zoll
4/15/2013
Purpose: Defines template tags for the Leapday Recipedia application.
License: This is a public work.
'''
from django import template
register = template.Library()
@register.filter()
def css_name(value):
'''
Returns the lower-case hyphen-replaced display name,
which used as the css class for the good.
Keyword Arguments:
value -> Good. The good to get the css class for.
'''
return value.lower().replace(' ','-')
@register.filter()
def desc_value_sort(value):
'''
Designed to sort the results of .iteritems() on a dict of goods
for the index.
value -> List of tuples.
'''
return sorted(value, key=lambda x: x[1]['active']['value'], reverse=True)
@register.filter()
def base_good_display_name(value):
BASE_GOODS = {'good_water': 'Water',
'good_food': 'Food',
'good_wood': 'Wood',
'good_stone': 'Stone',
'goodtype_crystal': 'Crystal'}
return BASE_GOODS[value]
|
Zerack/zoll.me
|
leapday/templatetags/leapday_extras.py
|
Python
|
mit
| 1,051
|
[
"CRYSTAL"
] |
a4ddab892c84b7887189d8ff38299cbaf7aacf5a69f1c75698403c6fcf524081
|
# Original Author: Travis Oliphant 2002
# Bug-fixes in 2006 by Tim Leslie
from __future__ import division, print_function, absolute_import
import numpy
from numpy import asarray, tan, exp, ones, squeeze, sign, \
all, log, sqrt, pi, shape, array, minimum, where, random
from .optimize import Result, _check_unknown_options
from scipy.lib.six import xrange
__all__ = ['anneal']
_double_min = numpy.finfo(float).min
_double_max = numpy.finfo(float).max
class base_schedule(object):
def __init__(self):
self.dwell = 20
self.learn_rate = 0.5
self.lower = -10
self.upper = 10
self.Ninit = 50
self.accepted = 0
self.tests = 0
self.feval = 0
self.k = 0
self.T = None
def init(self, **options):
self.__dict__.update(options)
self.lower = asarray(self.lower)
self.lower = where(self.lower == numpy.NINF, -_double_max, self.lower)
self.upper = asarray(self.upper)
self.upper = where(self.upper == numpy.PINF, _double_max, self.upper)
self.k = 0
self.accepted = 0
self.feval = 0
self.tests = 0
def getstart_temp(self, best_state):
""" Find a matching starting temperature and starting parameters vector
i.e. find x0 such that func(x0) = T0.
Parameters
----------
best_state : _state
A _state object to store the function value and x0 found.
Returns
-------
x0 : array
The starting parameters vector.
"""
assert(not self.dims is None)
lrange = self.lower
urange = self.upper
fmax = _double_min
fmin = _double_max
for _ in range(self.Ninit):
x0 = random.uniform(size=self.dims)*(urange-lrange) + lrange
fval = self.func(x0, *self.args)
self.feval += 1
if fval > fmax:
fmax = fval
if fval < fmin:
fmin = fval
best_state.cost = fval
best_state.x = array(x0)
self.T0 = (fmax-fmin)*1.5
return best_state.x
def accept_test(self, dE):
T = self.T
self.tests += 1
if dE < 0:
self.accepted += 1
return 1
p = exp(-dE*1.0/self.boltzmann/T)
if (p > random.uniform(0.0, 1.0)):
self.accepted += 1
return 1
return 0
def update_guess(self, x0):
pass
def update_temp(self, x0):
pass
# A schedule due to Lester Ingber
class fast_sa(base_schedule):
def init(self, **options):
self.__dict__.update(options)
if self.m is None:
self.m = 1.0
if self.n is None:
self.n = 1.0
self.c = self.m * exp(-self.n * self.quench)
def update_guess(self, x0):
x0 = asarray(x0)
u = squeeze(random.uniform(0.0, 1.0, size=self.dims))
T = self.T
y = sign(u-0.5)*T*((1+1.0/T)**abs(2*u-1)-1.0)
xc = y*(self.upper - self.lower)
xnew = x0 + xc
return xnew
def update_temp(self):
self.T = self.T0*exp(-self.c * self.k**(self.quench))
self.k += 1
return
class cauchy_sa(base_schedule):
def update_guess(self, x0):
x0 = asarray(x0)
numbers = squeeze(random.uniform(-pi/2, pi/2, size=self.dims))
xc = self.learn_rate * self.T * tan(numbers)
xnew = x0 + xc
return xnew
def update_temp(self):
self.T = self.T0/(1+self.k)
self.k += 1
return
class boltzmann_sa(base_schedule):
def update_guess(self, x0):
std = minimum(sqrt(self.T) * ones(self.dims),
(self.upper - self.lower) / 3.0 / self.learn_rate)
x0 = asarray(x0)
xc = squeeze(random.normal(0, 1.0, size=self.dims))
xnew = x0 + xc*std*self.learn_rate
return xnew
def update_temp(self):
self.k += 1
self.T = self.T0 / log(self.k+1.0)
return
class _state(object):
def __init__(self):
self.x = None
self.cost = None
# TODO:
# allow for general annealing temperature profile
# in that case use update given by alpha and omega and
# variation of all previous updates and temperature?
# Simulated annealing
def anneal(func, x0, args=(), schedule='fast', full_output=0,
T0=None, Tf=1e-12, maxeval=None, maxaccept=None, maxiter=400,
boltzmann=1.0, learn_rate=0.5, feps=1e-6, quench=1.0, m=1.0, n=1.0,
lower=-100, upper=100, dwell=50, disp=True):
"""
Minimize a function using simulated annealing.
Uses simulated annealing, a random algorithm that uses no derivative
information from the function being optimized. Other names for this
family of approaches include: "Monte Carlo", "Metropolis",
"Metropolis-Hastings", `etc`. They all involve (a) evaluating the
objective function on a random set of points, (b) keeping those that
pass their randomized evaluation critera, (c) cooling (`i.e.`,
tightening) the evaluation critera, and (d) repeating until their
termination critera are met. In practice they have been used mainly in
discrete rather than in continuous optimization.
Available annealing schedules are 'fast', 'cauchy' and 'boltzmann'.
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
`f(x, *args)`, where `x` is the argument in the form of a 1-D array
and `args` is a tuple of any additional fixed parameters needed to
completely specify the function.
x0: 1-D array
An initial guess at the optimizing argument of `func`.
args : tuple, optional
Any additional fixed parameters needed to completely
specify the objective function.
schedule : str, optional
The annealing schedule to use. Must be one of 'fast', 'cauchy' or
'boltzmann'. See `Notes`.
full_output : bool, optional
If `full_output`, then return all values listed in the Returns
section. Otherwise, return just the `xmin` and `status` values.
T0 : float, optional
The initial "temperature". If None, then estimate it as 1.2 times
the largest cost-function deviation over random points in the
box-shaped region specified by the `lower, upper` input parameters.
Tf : float, optional
Final goal temperature. Cease iterations if the temperature
falls below `Tf`.
maxeval : int, optional
Cease iterations if the number of function evaluations exceeds
`maxeval`.
maxaccept : int, optional
Cease iterations if the number of points accepted exceeds `maxaccept`.
See `Notes` for the probabilistic acceptance criteria used.
maxiter : int, optional
Cease iterations if the number of cooling iterations exceeds `maxiter`.
learn_rate : float, optional
Scale constant for tuning the probabilistc acceptance criteria.
boltzmann : float, optional
Boltzmann constant in the probabilistic acceptance criteria
(increase for less stringent criteria at each temperature).
feps : float, optional
Cease iterations if the relative errors in the function value over the
last four coolings is below `feps`.
quench, m, n : floats, optional
Parameters to alter the `fast` simulated annealing schedule.
See `Notes`.
lower, upper : floats or 1-D arrays, optional
Lower and upper bounds on the argument `x`. If floats are provided,
they apply to all components of `x`.
dwell : int, optional
The number of times to execute the inner loop at each value of the
temperature. See `Notes`.
disp : bool, optional
Print a descriptive convergence message if True.
Returns
-------
xmin : ndarray
The point where the lowest function value was found.
Jmin : float
The objective function value at `xmin`.
T : float
The temperature at termination of the iterations.
feval : int
Number of function evaluations used.
iters : int
Number of cooling iterations used.
accept : int
Number of tests accepted.
status : int
A code indicating the reason for termination:
- 0 : Points no longer changing.
- 1 : Cooled to final temperature.
- 2 : Maximum function evaluations reached.
- 3 : Maximum cooling iterations reached.
- 4 : Maximum accepted query locations reached.
- 5 : Final point not the minimum amongst encountered points.
See Also
--------
basinhopping : another (more performant) global optimizer
brute : brute-force global optimizer
Notes
-----
Simulated annealing is a random algorithm which uses no derivative
information from the function being optimized. In practice it has
been more useful in discrete optimization than continuous
optimization, as there are usually better algorithms for continuous
optimization problems.
Some experimentation by trying the different temperature
schedules and altering their parameters is likely required to
obtain good performance.
The randomness in the algorithm comes from random sampling in numpy.
To obtain the same results you can call `numpy.random.seed` with the
same seed immediately before calling `anneal`.
We give a brief description of how the three temperature schedules
generate new points and vary their temperature. Temperatures are
only updated with iterations in the outer loop. The inner loop is
over loop over ``xrange(dwell)``, and new points are generated for
every iteration in the inner loop. Whether the proposed new points
are accepted is probabilistic.
For readability, let ``d`` denote the dimension of the inputs to func.
Also, let ``x_old`` denote the previous state, and ``k`` denote the
iteration number of the outer loop. All other variables not
defined below are input variables to `anneal` itself.
In the 'fast' schedule the updates are::
u ~ Uniform(0, 1, size = d)
y = sgn(u - 0.5) * T * ((1 + 1/T)**abs(2*u - 1) - 1.0)
xc = y * (upper - lower)
x_new = x_old + xc
c = n * exp(-n * quench)
T_new = T0 * exp(-c * k**quench)
In the 'cauchy' schedule the updates are::
u ~ Uniform(-pi/2, pi/2, size=d)
xc = learn_rate * T * tan(u)
x_new = x_old + xc
T_new = T0 / (1 + k)
In the 'boltzmann' schedule the updates are::
std = minimum(sqrt(T) * ones(d), (upper - lower) / (3*learn_rate))
y ~ Normal(0, std, size = d)
x_new = x_old + learn_rate * y
T_new = T0 / log(1 + k)
References
----------
[1] P. J. M. van Laarhoven and E. H. L. Aarts, "Simulated Annealing: Theory
and Applications", Kluwer Academic Publishers, 1987.
[2] W.H. Press et al., "Numerical Recipies: The Art of Scientific Computing",
Cambridge U. Press, 1987.
Examples
--------
*Example 1.* We illustrate the use of `anneal` to seek the global minimum
of a function of two variables that is equal to the sum of a positive-
definite quadratic and two deep "Gaussian-shaped" craters. Specifically,
define the objective function `f` as the sum of three other functions,
``f = f1 + f2 + f3``. We suppose each of these has a signature
``(z, *params)``, where ``z = (x, y)``, ``params``, and the functions are
as defined below.
>>> params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)
>>> def f1(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f)
>>> def f2(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale))
>>> def f3(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale))
>>> def f(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return f1(z, *params) + f2(z, *params) + f3(z, *params)
>>> x0 = np.array([2., 2.]) # Initial guess.
>>> from scipy import optimize
>>> np.random.seed(555) # Seeded to allow replication.
>>> res = optimize.anneal(f, x0, args=params, schedule='boltzmann',
full_output=True, maxiter=500, lower=-10,
upper=10, dwell=250, disp=True)
Warning: Maximum number of iterations exceeded.
>>> res[0] # obtained minimum
array([-1.03914194, 1.81330654])
>>> res[1] # function value at minimum
-3.3817...
So this run settled on the point [-1.039, 1.813] with a minimum function
value of about -3.382. The final temperature was about 212. The run used
125301 function evaluations, 501 iterations (including the initial guess as
a iteration), and accepted 61162 points. The status flag of 3 also
indicates that `maxiter` was reached.
This problem's true global minimum lies near the point [-1.057, 1.808]
and has a value of about -3.409. So these `anneal` results are pretty
good and could be used as the starting guess in a local optimizer to
seek a more exact local minimum.
*Example 2.* To minimize the same objective function using
the `minimize` approach, we need to (a) convert the options to an
"options dictionary" using the keys prescribed for this method,
(b) call the `minimize` function with the name of the method (which
in this case is 'Anneal'), and (c) take account of the fact that
the returned value will be a `Result` object (`i.e.`, a dictionary,
as defined in `optimize.py`).
All of the allowable options for 'Anneal' when using the `minimize`
approach are listed in the ``myopts`` dictionary given below, although
in practice only the non-default values would be needed. Some of their
names differ from those used in the `anneal` approach. We can proceed
as follows:
>>> myopts = {
'schedule' : 'boltzmann', # Non-default value.
'maxfev' : None, # Default, formerly `maxeval`.
'maxiter' : 500, # Non-default value.
'maxaccept' : None, # Default value.
'ftol' : 1e-6, # Default, formerly `feps`.
'T0' : None, # Default value.
'Tf' : 1e-12, # Default value.
'boltzmann' : 1.0, # Default value.
'learn_rate' : 0.5, # Default value.
'quench' : 1.0, # Default value.
'm' : 1.0, # Default value.
'n' : 1.0, # Default value.
'lower' : -10, # Non-default value.
'upper' : +10, # Non-default value.
'dwell' : 250, # Non-default value.
'disp' : True # Default value.
}
>>> from scipy import optimize
>>> np.random.seed(777) # Seeded to allow replication.
>>> res2 = optimize.minimize(f, x0, args=params, method='Anneal',
options=myopts)
Warning: Maximum number of iterations exceeded.
>>> res2
status: 3
success: False
accept: 61742
nfev: 125301
T: 214.20624873839623
fun: -3.4084065576676053
x: array([-1.05757366, 1.8071427 ])
message: 'Maximum cooling iterations reached'
nit: 501
"""
opts = {'schedule': schedule,
'T0': T0,
'Tf': Tf,
'maxfev': maxeval,
'maxaccept': maxaccept,
'maxiter': maxiter,
'boltzmann': boltzmann,
'learn_rate': learn_rate,
'ftol': feps,
'quench': quench,
'm': m,
'n': n,
'lower': lower,
'upper': upper,
'dwell': dwell,
'disp': disp}
res = _minimize_anneal(func, x0, args, **opts)
if full_output:
return res['x'], res['fun'], res['T'], res['nfev'], res['nit'], \
res['accept'], res['status']
else:
return res['x'], res['status']
def _minimize_anneal(func, x0, args=(),
schedule='fast', T0=None, Tf=1e-12, maxfev=None,
maxaccept=None, maxiter=400, boltzmann=1.0,
learn_rate=0.5, ftol=1e-6, quench=1.0, m=1.0, n=1.0,
lower=-100, upper=100, dwell=50, disp=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
simulated annealing algorithm.
Options for the simulated annealing algorithm are:
disp : bool
Set to True to print convergence messages.
schedule : str
Annealing schedule to use. One of: 'fast', 'cauchy' or
'boltzmann'.
T0 : float
Initial Temperature (estimated as 1.2 times the largest
cost-function deviation over random points in the range).
Tf : float
Final goal temperature.
maxfev : int
Maximum number of function evaluations to make.
maxaccept : int
Maximum changes to accept.
maxiter : int
Maximum number of iterations to perform.
boltzmann : float
Boltzmann constant in acceptance test (increase for less
stringent test at each temperature).
learn_rate : float
Scale constant for adjusting guesses.
ftol : float
Relative error in ``fun(x)`` acceptable for convergence.
quench, m, n : float
Parameters to alter fast_sa schedule.
lower, upper : float or ndarray
Lower and upper bounds on `x`.
dwell : int
The number of times to search the space at each temperature.
This function is called by the `minimize` function with
`method=anneal`. It is not supposed to be called directly.
"""
_check_unknown_options(unknown_options)
maxeval = maxfev
feps = ftol
x0 = asarray(x0)
lower = asarray(lower)
upper = asarray(upper)
schedule = eval(schedule+'_sa()')
# initialize the schedule
schedule.init(dims=shape(x0), func=func, args=args, boltzmann=boltzmann,
T0=T0, learn_rate=learn_rate, lower=lower, upper=upper,
m=m, n=n, quench=quench, dwell=dwell)
current_state, last_state, best_state = _state(), _state(), _state()
if T0 is None:
x0 = schedule.getstart_temp(best_state)
else:
best_state.x = None
best_state.cost = numpy.Inf
last_state.x = asarray(x0).copy()
fval = func(x0, *args)
schedule.feval += 1
last_state.cost = fval
if last_state.cost < best_state.cost:
best_state.cost = fval
best_state.x = asarray(x0).copy()
schedule.T = schedule.T0
fqueue = [100, 300, 500, 700]
iters = 0
while 1:
for n in xrange(dwell):
current_state.x = schedule.update_guess(last_state.x)
current_state.cost = func(current_state.x, *args)
schedule.feval += 1
dE = current_state.cost - last_state.cost
if schedule.accept_test(dE):
last_state.x = current_state.x.copy()
last_state.cost = current_state.cost
if last_state.cost < best_state.cost:
best_state.x = last_state.x.copy()
best_state.cost = last_state.cost
schedule.update_temp()
iters += 1
# Stopping conditions
# 0) last saved values of f from each cooling step
# are all very similar (effectively cooled)
# 1) Tf is set and we are below it
# 2) maxeval is set and we are past it
# 3) maxiter is set and we are past it
# 4) maxaccept is set and we are past it
fqueue.append(squeeze(last_state.cost))
fqueue.pop(0)
af = asarray(fqueue)*1.0
if all(abs((af-af[0])/af[0]) < feps):
retval = 0
if abs(af[-1]-best_state.cost) > feps*10:
retval = 5
if disp:
print("Warning: Cooled to %f at %s but this is not"
% (squeeze(last_state.cost),
str(squeeze(last_state.x)))
+ " the smallest point found.")
break
if (Tf is not None) and (schedule.T < Tf):
retval = 1
break
if (maxeval is not None) and (schedule.feval > maxeval):
retval = 2
break
if (iters > maxiter):
if disp:
print("Warning: Maximum number of iterations exceeded.")
retval = 3
break
if (maxaccept is not None) and (schedule.accepted > maxaccept):
retval = 4
break
result = Result(x=best_state.x, fun=best_state.cost,
T=schedule.T, nfev=schedule.feval, nit=iters,
accept=schedule.accepted, status=retval,
success=(retval <= 1),
message={0: 'Points no longer changing',
1: 'Cooled to final temperature',
2: 'Maximum function evaluations',
3: 'Maximum cooling iterations reached',
4: 'Maximum accepted query locations reached',
5: 'Final point not the minimum amongst '
'encountered points'}[retval])
return result
if __name__ == "__main__":
from numpy import cos
# minimum expected at ~-0.195
func = lambda x: cos(14.5 * x - 0.3) + (x + 0.2) * x
print(anneal(func, 1.0, full_output=1, upper=3.0, lower=-3.0,
feps=1e-4, maxiter=2000, schedule='cauchy'))
print(anneal(func, 1.0, full_output=1, upper=3.0, lower=-3.0,
feps=1e-4, maxiter=2000, schedule='fast'))
print(anneal(func, 1.0, full_output=1, upper=3.0, lower=-3.0,
feps=1e-4, maxiter=2000, schedule='boltzmann'))
# minimum expected at ~[-0.195, -0.1]
func = lambda x: (cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] +
(x[0] + 0.2) * x[0])
print(anneal(func, [1.0, 1.0], full_output=1,
upper=[3.0, 3.0], lower=[-3.0, -3.0],
feps=1e-4, maxiter=2000, schedule='cauchy'))
print(anneal(func, [1.0, 1.0], full_output=1,
upper=[3.0, 3.0], lower=[-3.0, -3.0],
feps=1e-4, maxiter=2000, schedule='fast'))
print(anneal(func, [1.0, 1.0], full_output=1,
upper=[3.0, 3.0], lower=[-3.0, -3.0],
feps=1e-4, maxiter=2000, schedule='boltzmann'))
|
juliantaylor/scipy
|
scipy/optimize/anneal.py
|
Python
|
bsd-3-clause
| 23,213
|
[
"Gaussian"
] |
663059e647966acf2e5ab541d9324fa1f5ae01cca3018c6adc4c25bf29a2d266
|
"""
Just-in-time compilation support.
"""
import abc
import copy
import os
import re
import ast
import logging
import inspect
import hashlib
import json
from collections import namedtuple
import tempfile
import ctree
from ctree.nodes import Project
from ctree.analyses import VerifyOnlyCtreeNodes
from ctree.frontend import get_ast, dump
from ctree.transforms import DeclarationFiller
from ctree.c.nodes import CFile, MultiNode
if ctree.OCL_ENABLED:
from ctree.ocl.nodes import OclFile
from ctree.nodes import File
log = logging.getLogger(__name__)
def getFile(filepath):
"""
Takes a filepath and returns a specialized File instance (i.e. OclFile,
CFile, etc)
"""
file_types = [CFile]
if ctree.OCL_ENABLED:
file_types.append(OclFile)
ext_map = {'.'+t._ext: t for t in file_types}
path, filename = os.path.split(filepath)
name, ext = os.path.splitext(filename)
filetype = ext_map[ext]
return filetype(name=name, path=path)
class JitModule(object):
"""
Manages compilation of multiple ASTs.
"""
def __init__(self):
import os
if ctree.CONFIG.get('jit', 'COMPILE_PATH') and ctree.CONFIG.getboolean('jit', 'CACHE'):
ctree_dir = ctree.CONFIG.get('jit','COMPILE_PATH')
# write files to $TEMPDIR/ctree/run-XXXX
else:
ctree_dir = os.path.join(tempfile.gettempdir(), "ctree")
if not os.path.exists(ctree_dir):
os.mkdir(ctree_dir)
#self.compilation_dir = tempfile.mkdtemp(prefix="run-", dir=ctree_dir)
self.ll_module = None
self.exec_engine = None
def _link_in(self, submodule):
self.so_file_name = submodule
# if self.ll_module is not None:
# self.ll_module.link_in(submodule)
# else:
# self.ll_module = submodule
def get_callable(self, entry_point_name, entry_point_typesig):
"""
Returns a python callable that dispatches to the requested C function.
"""
# get llvm represetation of function
# ll_function = self.ll_module.get_function(entry_point_name)
import ctypes
lib = ctypes.cdll.LoadLibrary(self.so_file_name)
func_ptr = getattr(lib, entry_point_name)
func_ptr.argtypes = entry_point_typesig._argtypes_
func_ptr.restype = entry_point_typesig._restype_
# func = func_ptr
# run jit compiler
# from llvm.ee import EngineBuilder
# self.exec_engine = llvm.create_jit_compiler(self.ll_module)
# c_func_ptr = self.exec_engine.get_pointer_to_global(ll_function)
# cast c_func_ptr to python callable using ctypes
return func_ptr
class ConcreteSpecializedFunction(object):
"""
A function backed by generated code.
"""
__metaclass__ = abc.ABCMeta
def _compile(self, entry_point_name, project_node, entry_point_typesig,
**kwargs):
"""
Returns a python callable.
"""
assert isinstance(project_node, Project), \
"Expected a Project but it got a %s." % type(project_node)
VerifyOnlyCtreeNodes().visit(project_node)
self._module = project_node.codegen(**kwargs)
# if log.getEffectiveLevel() == 'debug':
# highlighted = highlight(str(self._module.ll_module), 'llvm')
# log.debug("full LLVM program is: <<<\n%s\n>>>" % highlighted)
return self._module.get_callable(entry_point_name, entry_point_typesig)
@abc.abstractmethod
def __call__(self, *args, **kwargs):
pass
class LazySpecializedFunction(object):
"""
A callable object that will produce executable
code just-in-time.
"""
ProgramConfig = namedtuple('ProgramConfig',
['args_subconfig', 'tuner_subconfig'])
_directory_fields = ['__class__.__name__', 'backend_name']
class NameExtractor(ast.NodeVisitor):
"""
Extracts the first functiondef name found
"""
def visit_FunctionDef(self, node):
return node.name
def generic_visit(self, node):
for field, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
res = self.visit(item)
if res:
return res
elif isinstance(value, ast.AST):
res = self.visit(value)
if res:
return res
def __init__(self, py_ast=None, sub_dir=None, backend_name="default"):
self._hash_cache = None
if py_ast is not None and \
self.apply is not LazySpecializedFunction.apply:
raise TypeError('Cannot define apply and pass py_ast')
self.original_tree = py_ast or \
(get_ast(self.apply)
if self.apply is not LazySpecializedFunction.apply else None)
self.concrete_functions = {} # config -> callable map
self._tuner = self.get_tuning_driver()
self.sub_dir = sub_dir or \
self.NameExtractor().visit(self.original_tree) or \
hex(hash(self))[2:]
self.backend_name = backend_name
@property
def original_tree(self):
return copy.deepcopy(self._original_tree)
@property
def tree(self):
return self._original_tree
@original_tree.setter
def original_tree(self, value):
if not hasattr(self, '_original_tree'):
self._original_tree = value
elif ast.dump(self.__original_tree, True, True) != \
ast.dump(value, True, True):
raise AttributeError('Cannot redefine the ast')
@property
def info_filename(self):
return 'info.json'
def get_info(self, path):
info_filepath = os.path.join(path, self.info_filename)
if not os.path.exists(info_filepath):
return {'hash': None, 'files': []}
with open(info_filepath) as info_file:
return json.load(info_file)
def set_info(self, path, dictionary):
info_filepath = os.path.join(path, self.info_filename)
with open(info_filepath, 'w') as info_file:
return json.dump(dictionary, info_file)
@staticmethod
def _hash(o):
if isinstance(o, dict) and type(o).__hash__ is dict.__hash__:
return hash(frozenset(
LazySpecializedFunction._hash(item) for item in o.items()
))
else:
try:
return hash(o)
except TypeError:
return hash(str(o))
def __hash__(self):
# mro = type(self).mro()
# result = hashlib.sha512(''.encode())
# for klass in mro:
# if issubclass(klass, LazySpecializedFunction):
# try:
# result.update(inspect.getsource(klass).encode())
# except IOError:
# # means source can't be found. Well, can't do anything
# # about that I don't think
# pass
# else:
# pass
# if self.original_tree is not None:
# tree_str = ast.dump(self.original_tree,
# annotate_fields=True, include_attributes=True)
# result.update(tree_str.encode())
# return int(result.hexdigest(), 16)
if self._hash_cache is not None:
return self._hash_cache
try:
self_hash = hash(inspect.getsource(type(self)).encode())
except TypeError:
self_hash = 1
#self_hash = 1
tree_hash = hash(dump(self._original_tree, annotate_fields=True, include_attributes=True))
self._hash_cache = self_hash * tree_hash
return self._hash_cache
def config_to_dirname(self, program_config):
"""Returns the subdirectory name under .compiled/funcname"""
# fixes the directory names and squishes invalid chars
regex_filter = re.compile(r"""[/\?%*:|"<>()'{} -]""")
def deep_getattr(obj, s):
parts = s.split('.')
for part in parts:
obj = getattr(obj, part)
return obj
path_parts = [
self.sub_dir,
str(type(self)),
str(self._hash(program_config.args_subconfig)),
str(self._hash(program_config.tuner_subconfig))
]
for attrib in self._directory_fields:
path_parts.append(str(deep_getattr(self, attrib)))
filtered_parts = [
str(re.sub(regex_filter, '_', part)) for part in path_parts]
compile_path = str(ctree.CONFIG.get('jit', 'COMPILE_PATH'))
path = os.path.join(compile_path, *filtered_parts)
return re.sub('_+', '_', path)
def get_program_config(self, args, kwargs):
# Don't break old specializers that don't support kwargs
try:
args_subconfig = self.args_to_subconfig(args, kwargs)
except TypeError:
args_subconfig = self.args_to_subconfig(args)
try:
self._tuner.configs.send((args, args_subconfig))
except TypeError:
"Can't send into an unstarted generator"
pass
tuner_subconfig = next(self._tuner.configs)
log.info("tuner subconfig: %s", tuner_subconfig)
log.info("arguments subconfig: %s", args_subconfig)
return self.ProgramConfig(args_subconfig, tuner_subconfig)
def get_transform_result(self, program_config, dir_name, cache=True):
info = self.get_info(dir_name)
# check to see if the necessary code is in the persistent cache
if hash(self) != info['hash'] and self.original_tree is not None \
or not cache:
# need to run transform() for code generation
log.info('Hash miss. Running Transform')
ctree.STATS.log("Filesystem cache miss")
transform_result = self.run_transform(program_config)
# Saving files to cache directory
for source_file in transform_result:
assert isinstance(source_file, File), \
"Transform must return an iterable of Files"
source_file.path = dir_name
new_info = {'hash': hash(self),
'files': [os.path.join(f.path, f.get_filename())
for f in transform_result]}
self.set_info(dir_name, new_info)
else:
log.info('Hash hit. Skipping transform')
ctree.STATS.log('Filesystem cache hit')
files = [getFile(path) for path in info['files']]
transform_result = files
return transform_result
def __call__(self, *args, **kwargs):
"""
Determines the program_configuration to be run. If it has yet to be
built, build it. Then, execute it. If the selected
program_configuration for this function has already been code
generated for, this method draws from the cache.
"""
ctree.STATS.log("specialized function call")
log.info("detected specialized function call with arg types: %s",
[type(a) for a in args] +
[type(kwargs[key]) for key in kwargs])
program_config = self.get_program_config(args, kwargs)
dir_name = self.config_to_dirname(program_config)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
config_hash = dir_name
# checks to see if the necessary code is in the run-time cache
if ctree.CONFIG.getboolean('jit', 'CACHE') and \
config_hash in self.concrete_functions:
ctree.STATS.log("specialized function cache hit")
log.info("specialized function cache hit!")
csf = self.concrete_functions[config_hash]
else:
ctree.STATS.log("specialized function cache miss")
log.info("specialized function cache miss.")
transform_result = self.get_transform_result(
program_config, dir_name)
csf = self.finalize(transform_result, program_config)
assert isinstance(csf, ConcreteSpecializedFunction), \
"Expected a ctree.jit.ConcreteSpecializedFunction, \
but got a %s." % type(csf)
self.concrete_functions[config_hash] = csf
return csf(*args, **kwargs)
def run_transform(self, program_config):
transform_result = self.transform(
self.original_tree,
program_config
)
if not isinstance(transform_result, (tuple, list)):
transform_result = (transform_result,)
transform_result = [DeclarationFiller().visit(source_file)
if isinstance(source_file, CFile) else source_file
for source_file in transform_result]
return transform_result
@classmethod
def from_function(cls, func, folder_name=''):
class Replacer(ast.NodeTransformer):
def visit_Module(self, node):
return MultiNode(body=[self.visit(i) for i in node.body])
def visit_FunctionDef(self, node):
if node.name == func.__name__:
node.name = 'apply'
node.body = [self.visit(item) for item in node.body]
return node
def visit_Name(self, node):
if node.id == func.__name__:
node.id = 'apply'
return node
func_ast = Replacer().visit(get_ast(func))
return cls(py_ast=func_ast, sub_dir=folder_name or func.__name__)
def report(self, *args, **kwargs):
"""
Records the performance of the most recent configuration.
"""
return self._tuner.report(*args, **kwargs)
# =====================================================
# Methods to be overridden by the user
def transform(self, tree, program_config):
"""
Convert the AST 'tree' into a C AST, optionally taking advantage of the
actual runtime arguments.
"""
raise NotImplementedError()
def finalize(self, transform_result, program_config):
"""
This function will be passed the result of transform. The specializer
should return an ConcreteSpecializedFunction.
"""
raise NotImplementedError("Finalize must be implemented")
def get_tuning_driver(self):
"""
Define the space of possible implementations.
"""
from ctree.tune import ConstantTuningDriver
return ConstantTuningDriver('')
def args_to_subconfig(self, args):
"""
Extract features from the arguments to define uniqueness of
this particular invocation. The return value must be a hashable
object, or a dictionary of hashable objects.
"""
log.warn("arguments will not influence program_config. " +
"Consider overriding args_to_subconfig() in %s.",
type(self).__name__)
return {}
@staticmethod
def apply(*args):
raise NotImplementedError()
|
ucb-sejits/ctree
|
ctree/jit.py
|
Python
|
bsd-2-clause
| 15,340
|
[
"VisIt"
] |
8ebe3caea1515a7a2ba1895b88552a6824a68b3965662d2825466e7054a508cd
|
import pyfits as fits
import numpy as np
from scipy.optimize import curve_fit, leastsq
import sys
import getopt
import os
import time
import shutil
'''
Last changed:
July 29. 2014 - Morten Stostad - Improved readability
Gemini NIFS data reduction script - PYRAF version
Reduction for: GENERAL BASELINE CALIBRATIONS
DESCRIPTION
This script is a pyraf-compatible version of the original NIFS scripts
written by Tracy Beck. Original pyraf-compatible code by R. McDermid, with
further modifications by T. Do and M. Stostad.
This is a module written to reduce all NIFS-data, put together by combining
three different modules by R. McDermid and T. Beck (Step 2, 3 and 4) with
improved telluric correction (Step 3.2). The following is the minimum needed
to be able to reduce data:
Science files taken on the night of observation
Calibration files taken on the night of observation
Text files of arc lines (or telluric lines if lamp calib files unavailable)
Template of a typical A-star (e.g. Vega)
Lists of all the calibration files (e.g. skylist, arclist, etc)
Pyraf, numpy, pyfits and scipy installed
There are four major steps in the reduction procedure.
1. Creating separate files that contain lists of the files; there
should be one skylist that has a list of all the sky files, one
arclist that has all the arc files, etc. THIS STEP IS NOT IN THE PIPELINE.
Use mkNifsFilesList.py to create most of them - some have to be created
manually, however..
2. Preparing the base calibration files (ronchi flats, bad pixel mask,
wavelength solutions, etc). Yields five files; a shift reference
file, a flat field, a flat BPM, an arc frame and a ronchi flat.
3. Preparing for telluric correction. Yields a telluric correction fits file.
4. Reducing the science data by using the files created in 1) and 2).
The following are the only parameters the user should have to change.
Of course, it doesn't hurt to understand the rest of the code, but
ideally the program should work fine by just changing these parameters
and running run_reduction() in the python shell.
DISCLAIMER: This is a very basic Python pipeline, and it is not
particularly well made. We publish it mainly because the previous
pipeline from the Gemini website had obvious flaws, and could not
be used in Python without a lot of extra work. Anyone that uses
this pipeline should be aware that although it should work for
general reduction of NIFS data, its main purpose was reduction of
the data from Stostad+ 2014, and has only been tested on that data.
'''
#Change a value to False if the step should be skipped. The steps are
#ordered [Prepare base calibration files, prepare telluric file,
#reduce science data]. For instance, if the telluric correction file
#is already acquired, change to [True, False, True].
steps = [True, True, True]
# The folders for the calibration and science files, and the folder for the reduced
# files. (raw_data and red_data are just the same variables in step 3&4).
# dat_dir = name of top folder
# raw_dir = location of calibration files
# reduce_dir = backup location
# reduced_all = folder for reduced files
datDir = "/Users/username/nifs/date/"
raw_dir = datDir+"calib/"
reduce_dir= datDir+"backup/"
reduced_all = datDir + 'reduced/'
raw_data = raw_dir
red_data = reduce_dir
# No parameters for step 1; the user has to create the filelists themselves.
# If the script mkNifsFilesList.py is available, the user could use that.
#################### PARAMETERS FOR STEP 2 ####################
# Folders of the raw calibration data, the output folder for reduced files and
# the folder where the lamp reference files are. These are the only parameters
# every user will have to change for step 1. The backup directory for the
# finished calibration files can also be changed.
arcDir = "/Users/username/nifs/arclamps/data/" # lamp reference files folder
backupDir = reduce_dir+'calib_backup/' # another backup folder
useSkyLines = False # set to True if no lamp calibrations were taken and
# telluric lines should be used for wavelength calibration.
# In almost all cases this should be False.
# It's assumed that the list of files (e.g. flatlist) are already in
# datDir/calib/ from Step 1.
# Set the file names for the lists of the calibration file names.
# These are the names of the list created in Step 1.
flatlist = "flatlist"
flatdarklist = "flatdarklist"
ronchilist = "ronchilist"
if useSkyLines:
arcStr = 'skylist'
arcStrDrk = 'skydarklist'
else:
arcStr = 'arclist'
arcStrDrk = 'arcdarklist'
# Change steps_basec if not all the substeps of the base calibration should be run
# [find first shift file, make flats, find wavelength solution, make ronchi flat]
steps_basec = [True,True,True,True]
#################### PARAMETERS FOR STEP 3 ####################
# The spectral resolution of the science data spectra.
R = 5000
# These are the file names of processed calibration files, which were created by
# step 2. If they're not manually modified, simply put ManualNames = False,
# and you won't have to worry about them. If you have changed the names since
# step 2 created them, put ManualNames = True and change the file names here.
ManualNames = False
# calflat = "flat"
# bpm = "flat_bpm.pl"
# arc = "wsoln"
# ronchiflat = "rflat"
# shiftimage = "shiftFile"
# Specify the name of the text file containing the name of the A star
# fits file and the corresponding sky file.
#
# Example:
# tellistfile = 'astar_hd155379'
# astarskylistfile = 'astar_hd155379_sky'
tellistfile = ""
astarskylistfile = ""
# The high-resolution stellar template:
fits_template = '/Users/username/nifs/templates/vega_all.fits'
# The output name of the final telluric file:
telluric_norm = 'telluric_norm_weighted.fits'
#################### PARAMETERS FOR STEP 4 ####################
rmReducedData = False
# Same as above. Don't worry about the names (they will be
# overwritten) if ManualNames = False.
ManualNames = False
calflat = "flat"
arc = "wrgnN20120505S0384"
ronchiflat = "rgnN20120505S0542"
shiftimage = "shiftFile"
bpm = "flat_bpm.pl"
# Name of the text files with the list of science/sky filenames.
scilistfile = 'sciencename'
skylistfile = 'skylist'
# optionally set to use a specific sky file (if not set to None).
# otherwise, will combine all the files in the skyfile list into 1 and
# use that sky. This is in contrast to the procedure in the original
# script that requires one sky for every science frame.
useSkyFile = None
telluric = telluric_norm
# END EDITING HERE
#--------------------------------------------------------------------------
#STEP 1:
#The user must create the lists of calibration files outside this script.
######################################################################
######################################################################
######################################################################
# STEP 2:
# PREPARING BASE CALIBRATION FILES
#
# To do the main part of the calibration in step 4 we must create certain
# calibration files. There are five of these:
#
# flat.fits
# flat_bpm.pl
# wsoln.fits
# rflat.fits
# shiftFile.fits
#
# The following code was written by R. McDermid. It should create all
# these files in the current directory.
######################################################################
# Gemini NIFS data reduction script - PYRAF version
# Reduction for: GENERAL BASELINE CALIBRATIONS
#
###########################################################################
# DESCRIPTION #
# #
# This script is a pyraf-compatible version of the original NIFS scripts #
# written by Tracy Beck. #
# #
# #
###########################################################################
# Current limitations: The NIFS Baseline calibration reductions have #
# not been well tested on data that was obtained with non-standard #
# wavelength configurations. (i.e., a different central wavelength #
# setting than the default Z, J, H and K-band values of 1.05, 1.25, 1.65 #
# and 2.20 microns). #
# #
###########################################################################
###########################################################################
# STEP 2.1: Prepare IRAF #
###########################################################################
# Import the pyraf module and relevant packages
from pyraf import iraf
def prep_nifs(log):
iraf.gemini()
iraf.nifs()
iraf.gnirs()
iraf.gemtools()
# Unlearn the used tasks
iraf.unlearn(iraf.gemini,iraf.gemtools,iraf.gnirs,iraf.nifs)
iraf.set(stdimage='imt2048')
# Prepare the package for NIFS
iraf.nsheaders("nifs",logfile=log)
# Set clobber to 'yes' for the script. This still does not make the gemini tasks
# overwrite files, so you will likely have to remove files if you re-run the script.
user_clobber=iraf.envget("clobber")
iraf.reset(clobber='yes')
###########################################################################
# STEP 2.2: Determine the shift to the MDF file #
###########################################################################
def mk_mdf_shift(infile,raw_dir,log,outpref="s",overwrite=True):
outfile = outpref+infile
if os.path.exists(outfile) & overwrite:
print '% mk_mdf_shift: file exists, will now overwrite: '+outfile
os.remove(outfile)
# find the MDF shift, by default prefixes the file with an "s"
print '% NIFS_Basecalib: Determine the shift to the MDF file:'
iraf.nfprepare(infile,rawpath=raw_dir,outpref=outpref, shiftx='INDEF',
shifty='INDEF',fl_vardq='no',fl_corr='no',fl_nonl='no', logfile=log)
# return the name of the shiftfile
return outfile
###########################################################################
# STEP 2.3: Make the Flat field and BPM #
###########################################################################
def mk_flat(flatlist,flatdarklist,raw_dir,shift_image,log):
print '% NIFS_Basecalib: Make the flat field and BPM:'
# nfprepare preps the data by validating the data, read the array
# data, look at saturation and linear limit levels, optionally
# subtract reference pixels, correct for non-linearity (not on by
# default), calculate the variance, calculate the data quality, and
# detect cosmic rays (not on by default).
#
# Importantly, nfprepare also adds in the shifts from flexture (MDF
# shifts) to the data. In this case, it was calculated using one file
# above and used for the rest here as 'shiftim'.
# note: the keyword 'fl_cut' in nsreduce was only recently changed
# in Dec. 2012 from 'fl_nscut', so there may be bugs with this
# keyword depending on which version of the Gemini IRAF routines
# you are using
calflat=str(open(flatlist, "r").readlines()[0]).strip()
flatdark=str(open(flatdarklist, "r").readlines()[0]).strip()
iraf.nfprepare("@"+flatlist,rawpath=raw_dir,shiftim=shift_image,
fl_vardq='yes',fl_int='yes',fl_corr='no',fl_nonl='no', logfile=log)
iraf.nfprepare("@"+flatdarklist,rawpath=raw_dir,shiftim=shift_image,
fl_vardq='yes',fl_int='yes',fl_corr='no',fl_nonl='no', logfile=log)
# gemcombine combines multiple frames into one
iraf.gemcombine("n//@"+flatlist,output="gn"+calflat,fl_dqpr='yes',
fl_vardq='yes',masktype="none",logfile=log)
iraf.gemcombine("n//@"+flatdarklist,output="gn"+flatdark,fl_dqpr='yes',
fl_vardq='yes',masktype="none",logfile=log,combine="median")
# nsreduce does sky subtraction and flattens the spectroscopic data
iraf.nsreduce("gn"+calflat,fl_cut='yes',fl_nsappw='yes',fl_vardq='yes',
fl_sky='no',fl_dark='no',fl_flat='no',logfile=log,outprefix='r')
iraf.nsreduce("gn"+flatdark,fl_cut='yes',fl_nsappw='yes',fl_vardq='yes',
fl_sky='no',fl_dark='no',fl_flat='no',logfile=log,outprefix='r')
# creating flat image, final name = rnN....._sflat.fits
print '% NIFS_Basecalib: creating flat image, final name = rgnN....._sflat.fits'
iraf.nsflat("rgn"+calflat,darks="gn"+os.path.splitext(flatdark)[0],
flatfile="rgn"+os.path.splitext(calflat)[0]+"_sflat",
darkfile="rgn"+os.path.splitext(flatdark)[0]+"_dark",
fl_save_dark='yes',process="fit",
thr_flo=0.15,thr_fup=1.55,fl_vardq='yes',logfile=log)
#rectify the flat for slit function differences - make the final flat.
print '% NIFS_Basecalib: rectify the flat for slit function differences - make the final flat.'
iraf.nsslitfunction("rgn"+calflat,"rgn"+os.path.splitext(calflat)[0]+"_flat",
flat="rgn"+os.path.splitext(calflat)[0]+"_sflat",
dark="rgn"+os.path.splitext(flatdark)[0]+"_dark",
combine="median",
order=3,fl_vary='no',logfile=log)
# return [flat file, dark for the flat, bad pixel mask]
return ("rgn"+os.path.splitext(calflat)[0]+"_flat","rgn"+os.path.splitext(flatdark)[0]+"_dark",
"rgn"+os.path.splitext(calflat)[0]+"_sflat_bpm.pl")
###########################################################################
# STEP 2.4: Reduce the Arc and determine the wavelength solution #
###########################################################################
def mk_wavelength_solution(arcStr,arcStrDrk,raw_dir,shift_image,
flat_image,bpm,log,arcDir,useSkyLines=False):
arc=str(open(arcStr, "r").readlines()[0]).strip()
arcdark=str(open(arcStrDrk, "r").readlines()[0]).strip()
iraf.nfprepare('@'+arcStr, rawpath=raw_dir, shiftimage=shift_image,
bpm=bpm,fl_vardq='yes',
fl_corr='no',fl_nonl='no', logfile=log)
iraf.nfprepare('@'+arcStrDrk, rawpath=raw_dir, shiftimage=shift_image,
bpm=bpm,fl_vardq='yes',
fl_corr='no',fl_nonl='no', logfile=log)
# Determine the number of input arcs and arc darks so that the
# routine runs automatically for single or multiple files.
nfiles = len(open(arcStr).readlines())
if nfiles > 1:
iraf.gemcombine("n//@"+arcStr,output="gn"+os.path.splitext(arc)[0],
fl_dqpr='yes',fl_vardq='yes',masktype="none",logfile=log)
else:
iraf.copy("n"+os.path.splitext(arc)[0]+".fits",
"gn"+os.path.splitext(arc)[0]+".fits")
nfiles = len(open(arcStrDrk).readlines())
if nfiles > 1:
iraf.gemcombine("n//@"+arcStrDrk,output="gn"+os.path.splitext(arcdark)[0],
fl_dqpr='yes',fl_vardq='yes',masktype="none",logfile=log)
else:
iraf.copy("n"+os.path.splitext(arcdark)[0]+".fits",
"gn"+os.path.splitext(arcdark)[0]+".fits")
iraf.nsreduce("gn"+arc,outpr="r",darki="gn"+os.path.splitext(arcdark)[0],
flati=flat_image,
fl_vardq='no', fl_cut='yes', fl_nsappw='yes', fl_sky='no',
fl_dark='yes',fl_flat='yes',
logfile=log)
###########################################################################
# DATA REDUCTION HINT - #
# For the nswavelength call, the different wavelength settings #
# use different vaues for some of the parameters. For optimal auto #
# results, use: #
# #
# K-band: thresho=50.0, cradius=8.0 --> (gives rms of 0.1 to 0.3) #
# H-band: thresho=100.0, cradius=8.0 --> (gives rms of 0.05 to 0.15) #
# J-band: thresho=100.0 --> (gives rms of 0.03 to 0.09) #
# Z-band: Currently not working very well for non-interactive mode #
# #
# Note that better RMS fits can be obtained by running the wavelength #
# calibration interactively and identifying all of the lines #
# manually. Tedious, but will give more accurate results than the #
# automatic mode (i.e., fl_inter-). Use fl_iner+ for manual mode. #
# #
###########################################################################
# Determine the wavelength of the observation and set the arc coordinate
# file. If the user wishes to change the coordinate file to a different
# one, they need only to change the "clist" variable to their line list
# in the coordli= parameter in the nswavelength call.
hdulist = fits.open("rgn"+os.path.splitext(arc)[0]+".fits")
band = hdulist[0].header['GRATING'][0:1]
if useSkyLines:
# optionally use the sky lines instead of lamps
clist=arcDir+"ohlines.dat"
my_thresh=50
nfound = 5
nlost = 1
else:
nfound = 10
nlost = 10
if band == "Z":
clist=arcDir+"ArXe_Z.dat"
my_thresh=100.0
elif band == "K":
clist=arcDir+"ArXe_K.dat"
my_thresh=50.0
else:
clist=arcDir+"argon.dat"
my_thresh=100.0
iraf.nswavelength("rgn"+arc, coordli=clist, nsum=10, thresho=my_thresh,
trace='yes',fwidth=2.0,match=-6,cradius=8.0,fl_inter='no',nfound=nfound,
nlost=nlost, logfile=log)
# return the name of the file with the wavelength solution
return 'wrgn'+arc
###########################################################################
# STEP 2.5: #
# Trace the spatial curvature and spectral distortion in the Ronchi flat #
###########################################################################
def mk_ronchi_flat(ronchilist,raw_dir,shift_image,flat_image,flat_dark,bpm,log):
ronchiflat=str(open(ronchilist, "r").readlines()[0]).strip()
iraf.nfprepare("@"+ronchilist,rawpath=raw_dir, shiftimage=shift_image,
bpm=bpm,
fl_vardq='yes',fl_corr='no',fl_nonl='no',logfile=log)
# Determine the number of input Ronchi calibration mask files so that
# the routine runs automatically for single or multiple files.
nfiles = len(open(ronchilist).readlines())
if nfiles > 1:
iraf.gemcombine("n//@"+ronchilist,output="gn"+ronchiflat,fl_dqpr='yes',
masktype="none",fl_vardq='yes',logfile=log)
else:
iraf.copy("n"+ronchiflat+".fits","gn"+ronchiflat+".fits")
iraf.nsreduce("gn"+ronchiflat, outpref="r", dark=flat_dark,
flatimage=flat_image,
fl_cut='yes', fl_nsappw='yes',
fl_flat='yes', fl_sky='no', fl_dark='yes', fl_vardq='no',
logfile=log)
iraf.nfsdist("rgn"+ronchiflat,
fwidth=6.0, cradius=8.0, glshift=2.8,
minsep=6.5, thresh=2000.0, nlost=3,
fl_inter='no',logfile=log)
return "rgn"+ronchiflat
## ###########################################################################
## # Reset to user defaults #
## ###########################################################################
def nifs_finish():
if user_clobber == "no":
iraf.set(clobber='no')
def run_basecalib(datDir, raw_dir, reduce_dir, backupDir, arcDir, useSkyLines, flatlist,\
flatdarklist, ronchilist, arcStr, arcStrDrk, steps_basec):
flatfile1=str(open(flatlist, "r").readlines()[0]).strip()
flatdark1=str(open(flatdarklist, "r").readlines()[0]).strip()
ronchifile1=str(open(ronchilist, "r").readlines()[0]).strip()
arcfile1=str(open(arcStr, "r").readlines()[0]).strip()
# Create a log file and back up the previous one if it already exists
log = 'Basecalib.log'
if os.path.exists(log):
t = time.localtime()
app = "_"+str(t[0])+str(t[1]).zfill(2)+str(t[2]).zfill(2)+'_'+ \
str(t[3]).zfill(2)+':'+str(t[4]).zfill(2)+':'+str(t[5]).zfill(2)
shutil.move(log,log+app)
# Reduce data in sequence
prep_nifs(log) # reset things
# find the shift from the first file
if steps_basec[0]:
print 'Using first file to find the initial shift: '+flatfile1
shift_image = mk_mdf_shift(flatfile1,raw_dir,log)
else:
shift_image = "s"+flatfile1
# make the flat files
if steps_basec[1]:
flat_image, flat_dark, bpm = mk_flat(flatlist,flatdarklist,raw_dir,shift_image,log)
else:
flat_image = "rgn"+os.path.splitext(flatfile1)[0]+"_flat.fits"
bpm = "rgn"+os.path.splitext(flatfile1)[0]+"_sflat_bpm.pl"
flat_dark = "rgn"+os.path.splitext(flatdark1)[0]+"_dark.fits"
# find the wavelength solution
if steps_basec[2]:
wave_file = mk_wavelength_solution(arcStr,arcStrDrk,raw_dir,shift_image,
flat_image,bpm,log,
useSkyLines=useSkyLines,
arcDir=arcDir)
else:
wave_file = 'wrgn'+arcfile1
# make the ronchi flats
if steps_basec[3]:
ronchi_file = mk_ronchi_flat(ronchilist,raw_dir,shift_image,flat_image,
flat_dark,bpm,log)
else:
ronchi_file = "rgn"+ronchifile1
# make the backup directory for the calibration files, in case
# they get erased.
backupDir = reduce_dir+'calib_backup/'
if not(os.path.isdir(reduce_dir)):
print "Directory not found, making directory:"+reduce_dir
os.mkdir(reduce_dir)
if not(os.path.isdir(backupDir)):
print "Directory not found, making directory:"+backupDir
os.mkdir(backupDir)
# copy the reduced files
print 'backing up ronchi flat: '+ronchi_file
shutil.copyfile(ronchi_file,reduce_dir+ronchi_file) # ronchi flat
shutil.copyfile(ronchi_file,backupDir+ronchi_file)
# shutil.copyfile("wrgn"+arc,reduce_dir+'wavelength_solution.fits')
print 'backing up wavelength solution: '+wave_file
shutil.copyfile(wave_file,reduce_dir+wave_file)
shutil.copyfile(wave_file,backupDir+wave_file)
# delete the old database if it's there, then insert the new one
try:
shutil.copytree('database',reduce_dir+'database')
except OSError:
shutil.rmtree(reduce_dir+'database')
shutil.copytree('database',reduce_dir+'database')
try:
shutil.copytree('database',backupDir+'database')
except OSError:
shutil.rmtree(backupDir+'database')
shutil.copytree('database',backupDir+'database')
# bad pixel map
print 'backing up bad pixel mask: '+'flat_bpm.pl'
shutil.copyfile(bpm,reduce_dir+'flat_bpm.pl')
shutil.copyfile(bpm,backupDir+'flat_bpm.pl')
# flat
print 'backing up flat: '+flat_image+'.fits'
shutil.copyfile(flat_image+'.fits',reduce_dir+'flat.fits')
shutil.copyfile(flat_image+'.fits',backupDir+'flat.fits')
# shift file
print "backing up shift file: shiftFile.fits"
shutil.copyfile(shift_image,reduce_dir+'shiftFile.fits')
shutil.copyfile(shift_image,backupDir+'shiftFile.fits')
# copy the names of the files out into a file
fileTypes = ['ronchiflat','badPixelMask','flat','shiftImage','waveSolution']
fileTypeNames = [ronchi_file,bpm,flat_image,shift_image,wave_file]
newfileTypeNames = [ronchi_file,'flat_bpm.pl','flat.fits','shiftFile.fits',wave_file]
output = open(reduce_dir+'calibfiles','w')
for ii in np.arange(len(fileTypes)):
output.write('%s \t %s \t %s\n' % (fileTypes[ii],fileTypeNames[ii],\
newfileTypeNames[ii]))
output.close()
## ###########################################################################
## # End of the Baseline Calibration reduction #
## ###########################################################################
## # #
## # The final output files created from this script for later science #
## # reduction have prefixes and file names of: #
## # 1. Shift reference file: "s"+calflat #
## # 2. Flat field: "rn"+calflat+"_flat" #
## # 3. Flat BPM (for DQ plane generation): "rn"+calflat+"_flat_bpm.pl" #
## # 4. Wavelength referenced Arc: "wrgn"+arc #
## # 5. Spatially referenced Ronchi Flat: "rgn"+ronchiflat #
## # 6. A database for some info on the ronchi flat and arc: "database" #
## # For this reduction, #
## # Shift ref. file = sN20060210S0195.fits #
## # Flat field = rgnN20060210S0195_flat.fits #
## # Flat BPM = rgnN20060210S0195_sflat_bpm.pl #
## # Arc frame = wrgnN20060210S0191.fits #
## # Ronchi flat = rgnN20060210S0389.fits #
## # database = database (a folder) #
## # #
## # Because of the shutil calls at the end of the script, these required #
## # files are also copied to reduce_dir with new (more convenient) names. #
## # These names are: #
## # #
## # Shift ref. file = shiftFile #
## # Flat field = flat #
## # Flat BPM = flat_bpm.pl #
## # Arc frame = wrgnN20060210S0191.fits #
## # Ronchi flat = rgnN20060210S0389.fits #
## # database = database #
## # #
## # The arc frame and ronchi flat have to have the same names as the #
## # database references them with their original names. #
## # #
## # A file with the name "calibfiles" is also written to reduce_dir, so #
## # the program can find the names of the different calibration files. #
## # #
## # These files are all the new files you need after having finished the #
## # base calibration. #
## # #
## ###########################################################################
######################################################################
######################################################################
######################################################################
#STEP 3:
#PREPARING FOR TELLURIC CORRECTION
#
#To do the telluric correction in step 4 we must have a normalized telluric
#absorption spectrum. To get this we need to do the following:
#
#1. Combining all the different exposures of the A-star taken on the
# night of observation to find a spectrum for the A-star observed.
# (code originally written by R.McDermid, slightly modified)
#2. Changing the stellar template (in our case Vega, but other templates
# can just as easily be used) into a usable form. There are three
# things needed to be done.
# 2a) Convolving the much higher-resolution template data to be
# smooth enough for comparison to the A-star observation data.
# This step also normalizes the template data.
# 2b) Fixing for any velocity the A-star in observation might have.
# 2c) Do a linear interpolation to force the data points from the
# stellar template to be at the same wavelengths as the A-star
# measurements.
#3. Dividing the observation A-star spectrum by the fixed template
# spectrum, then normalizing again.
#
#####################################################################
###### STEP 3.1 (Creating A-star spectrum) ######
# Some gaussian fitters have to be defined for later use:
def gauss_function(x, offset, a, x0, sigma):
return offset+a*np.exp(-(x-x0)**2/(2*sigma**2))
def test_fit(k, g):
'''A simple 1d gaussian fitter.
'''
m = np.argmax(g)
center = k[m]
maxPt = np.max(g)
dx = k[1]-k[0]
integral = (g*dx).sum()
if maxPt != 0.0:
sigma = integral/maxPt/2.0
else:
sigma = 1
popt, pcov = curve_fit(gauss_function, k, g, p0 = \
[np.min(g), maxPt, center, sigma])
return popt
def gaussian(height, center_x, center_y, width_x, width_y):
"""Returns a 2d gaussian function with the given parameters"""
width_x = float(width_x)
width_y = float(width_y)
return lambda x,y: height*np.exp(
-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)
def moments(data):
"""Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution by calculating its
moments """
total = data.sum()
X, Y = np.indices(data.shape)
x = (X*data).sum()/total
y = (Y*data).sum()/total
col = data[:, int(y)]
width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())
row = data[int(x), :]
width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())
height = data.max()
return height, x, y, width_x, width_y
def fitgaussian(data):
"""Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution found by a fit"""
params = moments(data)
errorfunction = lambda p: np.ravel(gaussian(*p)(*np.indices(data.shape)) -
data)
p, success = leastsq(errorfunction, params)
return p
# Finished defining the gaussian fitters.
def astar_spec(raw_data, red_data, ManualNames, tellistfile, astarskylistfile):
###########################################################################
# Gemini NIFS data reduction script
# Reduction for: TELLURIC STANDARD CALIBRATIONS
#
# DESCRIPTION:
#
# This script is a pyraf-compatible version of the original NIFS scripts
# written by Tracy Beck.
#
# Notes:
# - A sky frame is constructed by median combining sky frames. Editing
# the way the sky subtraction is done should be easy if telluric data
# were obtained by offsetting to the sky (In this case, just look at the
# way the NIFS Science reduction is constructed).
#
#
#
# VERSION:
# 1.0: Adapted from original cl scripts by Tracy Beck. R.McDermid, 05Sep2012
# 2.0: Modified to be automatic with use of a template A-star, step 3.2.
# M.Stostad, July2013
# 3.0: Added to rest of pipeline. M.Stostad, July2013
# AUTHOR: R.McDermid (rmcdermid@gemini.edu)
# Modified by: M.Stostad (morten.stostad@mail.utoronto.ca)
###########################################################################
###########################################################################
# STEP 3.1.1: Prepare IRAF #
###########################################################################
# Import the pyraf module and relevant packages
from pyraf import iraf
iraf.gemini()
iraf.nifs()
iraf.gnirs()
iraf.gemtools()
import pyfits as fits
# Unlearn the used tasks
iraf.unlearn(iraf.gemini,iraf.gemtools,iraf.gnirs,iraf.nifs)
# Create a log file and back up the previous one if it already exists
log = 'Telluric.log'
if os.path.exists(log):
t = time.localtime()
app = "_"+str(t[0])+str(t[1]).zfill(2)+str(t[2]).zfill(2)+'_'+ \
str(t[3]).zfill(2)+':'+str(t[4]).zfill(2)+':'+str(t[5]).zfill(2)
shutil.move(log,log+app)
iraf.set(stdimage='imt2048')
# Prepare the package for NIFS
iraf.nsheaders("nifs",logfile=log)
# Set clobber to 'yes' for the script. This still does not make the gemini tasks
# overwrite files, so you will likely have to remove files if you re-run the script.
user_clobber=iraf.envget("clobber")
iraf.reset(clobber='yes')
# Use the first telluric frame as the base name for the combined telluric spectrum
telluric=str(open(tellistfile, "r").readlines()[0]).strip()
###########################################################################
# STEP 3.1.2: Reduce the Telluric Standard #
###########################################################################
# Get the names of the base calibration files created in step 2 (assuming the
# file calibfiles was created as it should have been):
if ManualNames == False:
calib_names = open(red_data+'calibfiles','r')
ronchiflat = calib_names.readline().split()[2].strip() #'rgn'+"N20..."+'.fits'
bpm = calib_names.readline().split()[2].strip() #'flat_bpm.pl'
calflat = calib_names.readline().split()[2].strip() #'flat'
shiftimage = calib_names.readline().split()[2].strip() #'shiftFile'
arc = calib_names.readline().split()[2].strip() #'wrgn'+"N20..."+'.fits'
# Prepare the data
iraf.nfprepare("@"+tellistfile,rawpath=raw_data,shiftim=red_data+shiftimage,
bpm=red_data+bpm,fl_vardq='yes',
fl_int='yes',fl_corr='no',fl_nonl='no')
astarskyfile = open(astarskylistfile)
astar_sky = astarskyfile.readline().strip()
iraf.nfprepare("@"+astarskylistfile,rawpath=raw_data,shiftim=red_data+shiftimage,
bpm=red_data+bpm,fl_vardq='yes',
fl_int='yes',fl_corr='no',fl_nonl='no')
# Make a median combined sky from the offset frames, which uses the telluric
# frame as the name, with _sky appended.
iraf.gemcombine("n//@"+astarskylistfile,output="g"+astar_sky,
fl_dqpr='yes',fl_vardq='yes',masktype="none",logfile=log)
# Do the sky subtraction on all the individual frames. Read the list (then
# get rid of stupid '\n' character returns) first.
telluriclist=open(tellistfile, "r").readlines()
telluriclist=[word.strip() for word in telluriclist]
for image in telluriclist:
iraf.gemarith ("n"+image, "-", "g"+astar_sky, "sn"+image, fl_vardq="yes",
logfile=log)
#reduce and flat field the data
iraf.nsreduce("sn@"+tellistfile,outpref="r",
flatim=red_data+calflat,
fl_cut='yes',fl_nsappw='yes',fl_vardq='yes',fl_sky='no',
fl_dark='no',fl_flat='yes',logfile=log)
#fix bad pixels from the DQ plane
iraf.nffixbad("rsn@"+tellistfile,outpref="b",logfile=log)
print "The arc file is", arc
print "The ronchi flat is", ronchiflat
#derive the 2D to 3D spatial/spectral transformation
iraf.nsfitcoords("brsn@"+tellistfile,outpref="f", fl_int='no',
lamptr=arc, sdisttr=ronchiflat,
logfile=log,lxorder=4,syorder=4)
#apply the transformation determined in the nffitcoords step
iraf.nstransform("fbrsn@"+tellistfile,outpref="t",logfile=log)
# make cubes
# Reformat the data into a 3-D datacube
iraf.nifcube ("tfbrsn@"+tellistfile, logfile=log)
# Extract 1D spectra from the 3D data:
outpref = 'gx'
summed_specs = None
telcubes = open(tellistfile, 'r').readlines()
# A spectrum has to be extracted for each data cube of the A star. This
# spectrum is extracted by fitting a 2d gaussian to the median flux close
# to the center of the spatial directions, and then taking the spectra of
# all the pixels within the FWHM of this 2d gaussian. Then a median
# background ring is subtracted to create the summed spectrum.
for file in telcubes:
file = "ctfbrsn" + file.strip()
tel_data = fits.getdata(file)
# Create the median brightness fits file:
brightness = np.zeros((tel_data.shape[1],tel_data.shape[2])) #stand-in
for i in range(tel_data.shape[2]):
for j in range(tel_data.shape[1]):
brightness[j,i] = np.median(tel_data[:,j,i]) #double loop over
#every pixel
hdulist = fits.open(file)
hdu = hdulist[1]
hdu.data = brightness
try:
hdulist.writeto('m'+file)
except IOError:
os.remove('m'+file)
hdulist.writeto('m'+file)
med_data = brightness
ylength = len(med_data[:,0])
xlength = len(med_data[0,:])
# Doing a 2d gaussian fit around the center of the image:
y = int(ylength/2)
x = int(xlength/2)
print "Finding position of the A star in file:", file
g2d_range = y*4.0/5.0 # How many pixels should the radius of the 2d fitter be?
print "Fit between spatial coordinates:", \
(max(0, y - g2d_range), max(0, x - g2d_range)), \
(min(ylength, y + g2d_range), min(xlength, x + g2d_range))
fit = fitgaussian(med_data[max(0, y - g2d_range) : \
min(ylength, y + g2d_range), \
max(0, x - g2d_range): \
min(xlength, x + g2d_range)])
y = y - g2d_range + fit[1]
x = x - g2d_range + fit[2] # The fitter assumes the minimum point is (0,0).
r = 2.0*(fit[3] + fit[4])/2 # Using a rough avg width to find the "radius"
# of the star.
print "The star is at y =", y, "x =", x, ", and has a radius of", r, "pixels."
hdr = fits.getheader(file, 1)
spec_len = hdr['NAXIS3']
# Find the list of locations within the radius specified earlier, then
# extract the spectra from these locations and put them in spec_list.
star_locs = []
for j in range(int(y - r), int(y + r + 2)):
for i in range(int(x - r), int(x + r + 2)):
#range only uses ints, better with too many
#than with too few (some extra calculations, though)
distance = np.sqrt((j - y)**2 + (i - x)**2)
if (distance <= r) and j >= 0 and i >= 0:
star_locs.append([j, i])
spec_list = np.zeros((len(star_locs), spec_len))
indx = 0
for loc in star_locs:
if loc[0] < tel_data.shape[1] and loc[1] < tel_data.shape[2]:
spec_list[indx] = np.array(tel_data[:, loc[0], loc[1]])
indx += 1
rinner = r*1.5 # To find a median background spectrum from a ring
rout = r*2.0 # around the star we need an inner and outer radius
# Now do the exact same as above, but with the bacground ring instead
# of the inner circle. This yields a list of background spectra which
# we can take the median background spectrum from.
bg_locs = []
for j in range(int(y - rout), int(y + rout + 2)):
for i in range(int(x - rout), int(x + rout + 2)):
distance = np.sqrt((j - y)**2 + (i - x)**2)
if (distance > rinner) and (distance <= rout) and j >= 0 and i >= 0:
bg_locs.append([j, i])
bg_locs = np.array(bg_locs)
bgspec_list = np.zeros((len(bg_locs), spec_len))
indx = 0
for loc in bg_locs:
bgspec_list[indx] = np.array(tel_data[:, loc[0], loc[1]])
# Sometimes the pipeline fails here if the fit of the
# A-star is bad, for whatever reason. If so you won't
# have to run the first step of the pipeline again,
# but you should check the A-star files to see why
# the 2d Gaussian fitter didn't work.
indx += 1
median = np.median(bgspec_list, axis = 0)
# Finally subtract the median background spectrum (multiplied by the number
# of spectra summed over when finding the summed star spectrum) from the
# summed star spectrum.
sum1 = np.sum(spec_list, 0) - median*len(spec_list)
sum1 = sum1 / np.median(sum1)
# summed_specs is the list of finished spectra found from this loop, one
# for each .fits star file.
if summed_specs == None:
summed_specs = sum1
else:
summed_specs = np.vstack((summed_specs, sum1))
med_spec = np.zeros(len(summed_specs[0,:]))
for column in range(len(summed_specs[0,:])):
med_spec[column]= np.median(summed_specs[:,column])
# Creating a new fits file with a new header (to reflect the change to 1d)
# and our new data:
cubename = "ctfbrsn" + open(tellistfile, 'r').readline().strip()
oldhdr = fits.getheader(cubename, 1)
hdu = fits.PrimaryHDU()
hdr = hdu.header
hdr['BITPIX'] = oldhdr['BITPIX']
hdr['NAXIS'] = 1
hdr['NAXIS1'] = len(med_spec)
hdr['CTYPE'] = 'LINEAR'
hdr['CRVAL1'] = oldhdr['CRVAL3']
hdr['CRPIX1'] = oldhdr['CRPIX3']
hdr['CDELT1'] = oldhdr['CD3_3']
hdr['CD1_1'] = oldhdr['CD3_3']
try:
fits.writeto(outpref+cubename, med_spec, hdr)
print 'Writing new fits file', outpref+cubename
except IOError:
print 'Removing old telluric 1d-file:', outpref+cubename
os.remove(outpref+cubename)
print 'Writing new fits file', outpref+cubename
fits.writeto(outpref+cubename, med_spec, hdr)
###########################################################################
# Reset to user defaults #
###########################################################################
if user_clobber == "no":
iraf.set(clobber='no')
###########################################################################
# End of the Telluric Calibration Data Reduction #
# #
# The output of this reduction script is a 1-D spectrum used for #
# telluric calibration of NIFS science data. For this particular #
# reduction the output file name is "gxctfbrsn"+telluric, or: #
# gxctfbrsnN20100401S0138. The file prefixes are described below. #
# #
# g = gemcombined/gemarithed n=nfprepared s=skysubtracted #
# r=nsreduced b = bad pixel corrected f= run through nffitcoords #
# t = nftransformed x = extracted to a 1D spectrum m = flat 3d cube #
# #
# This script is meant to be a guideline as a method of a typical data #
# reduction for NIFS frames. Of course, NIFS PIs can add or skip steps #
# in this reduction as they deem fit in order to reduce their particular #
# datasets. #
# #
# version. 3.0: The output name of the 1d telluric file is now set in the #
# beginning of the pipeline. By default it is #
# "telluric_norm_weighed.fits" #
# #
###########################################################################
return outpref+cubename
###### STEP 3.2 (Functions for changing the stellar template) #######
def velocity_fix(fits_template, fits_data, vel_template = 'v_astartemplate.fits'):
'''The A star will have some velocity. Because the telluric absorption
will remain at the correct place regardless, we will have to shift the
spectrum from the template to simulate the template having the same velocity.
We find the velocity of the star by making a gaussian fit to both the data
and the template at the Brackett-gamma line, then measuring the difference.
'''
# We find the wavelength for the Brackett-gamma line of the template first:
dat = fits.getdata(fits_template)
header = fits.getheader(fits_template)
start = header['CRVAL1']
spec_delt = header['CD1_1']
no_bins = header['NAXIS1']
# Making a linear fit from the areas surrounding the Brackett-gamma line,
# as we have to correct for the downwards slope in the spectrum before fitting
# a gaussian. We need to find the x-values corresponding to these areas - I have
# chosen the wavelengths to be 21450-21870 angstrom, excluding 21550-21770 as the
# domain of the Brackett-gamma. These wavelengths are semi-arbitrary and can
# be changed at will.
linfitmin = int((21050 - start) / spec_delt)
xmin = int((21450 - start) / spec_delt)
xmax = int((21870 - start) / spec_delt)
linfitmax = int((22270 - start) / spec_delt)
xval = np.append(np.arange(linfitmin, xmin), np.arange(xmax, linfitmax))
lindata = np.append(dat[linfitmin:xmin], dat[xmax:linfitmax])
m, b = np.polyfit(xval, lindata, 1)
fitted_dat = dat/(m*np.arange(no_bins) + b)
# Now that we've fitted the data to a linear curve we can run the gaussian fitter:
gaussfit = test_fit(np.arange(xmin,xmax), fitted_dat[xmin:xmax])
x = gaussfit[2]
wlength = start + x*spec_delt # Done! Found the emission wavelength for the template.
# Now the exact same procedure for the data:
dat1 = fits.getdata(fits_data)
dat1 = dat1/np.median(dat1)
header1 = fits.getheader(fits_data)
start1 = header1['CRVAL1']
spec_delt1 = header1['CD1_1']
no_bins1 = header1['NAXIS1']
# The wavelengths have been adjusted slightly to account for some telluric absorption
# lines that would have clouded the linear/gaussian fit.
linfitmin1 = int((21350 - start1) / spec_delt1)
xmin1 = int((21550 - start1) / spec_delt1)
xmax1 = int((21770 - start1) / spec_delt1)
linfitmax1 = int((21940 - start1) / spec_delt1)
xval1 = np.append(np.arange(linfitmin1, xmin1), np.arange(xmax1, linfitmax1))
lindata1 = np.append(dat1[linfitmin1:xmin1], dat1[xmax1:linfitmax1])
m1, b1 = np.polyfit(xval1, lindata1, 1)
fitted_dat1 = dat1/(m1*np.arange(no_bins1) + b1)
gaussfit1 = test_fit(np.arange(xmin1,xmax1), fitted_dat1[xmin1:xmax1])
x1 = gaussfit1[2]
wlength1 = start1 + x1*spec_delt1 # Found the wavelength for the template - now our data
print "Template emission wavelength is", wlength,\
", data emission wavelength is", wlength1
diff = wlength1 - wlength
velocity = diff * 2.99792458E8 / wlength
print "The velocity of the A star is", velocity, "m s^-1"
return velocity
def prepare_data(fits_template, fits_data, R = 5000):
'''Prepares the sigma for the convolution. Assumes all header info
is in units of angstrom.
'''
#Find delta lambda (i.e. sigma) from the data info and R
header_data = fits.getheader(fits_data)
len_spec = header_data['NAXIS1']
start = header_data['CRVAL1']
bin_size = header_data['CD1_1']
sigma_wv = (start + (len_spec*bin_size)/2)/R
#Now convert this into units of pixels of the star template spectrum
header_template = fits.getheader(fits_template)
bin_size_template = header_template['CD1_1']
sigma = sigma_wv / bin_size_template
return sigma
def convolve(fits_template, sigma, smooth_template = 'c_astartemplate.fits'):
'''Convolute the 1d data from a fits file with the sigma specified. Sigma must be
given in number of bins. This is done on the high resolution template spectrum so
that it's smoothed out for the interpolation done later.
'''
dat = fits.getdata(fits_template)
header = fits.getheader(fits_template)
len_spec = header['NAXIS1']
start = header['CRVAL1']
spec_delt = header['CD1_1']
wx = np.arange(start, start + len_spec*spec_delt, spec_delt)
# Use a gaussian of arbitrary amplitude - as long as the spectrum
# is normalized after the convolution the amplitude is irrelevant
con = np.convolve(gauss_function(np.arange(-100,101), \
0, 1/(sigma*np.sqrt(2*np.pi)), 0, sigma), dat)
con1 = con[100:-100]
# Creates a .fits file with the smoothed data.
hdulist = fits.open(fits_template)
hdu = hdulist[0]
hdu.data = con1/np.median(con1)
try:
hdulist.writeto(smooth_template)
except IOError:
os.remove(smooth_template)
hdulist.writeto(smooth_template)
def lin_interpol(fits_template, fits_data, velocity, fixed_template):
'''Does a linear interpolation of the fits data, fixing it for velocity and
making the data points conform to the format of the data. Because the template
has a very high resolution a simple linear interpolation doesn't lose significant
data.
'''
# To do the interpolation we need the x-axis values for the template and our
# A star data. I call these x-axis values hres_spec (for the template) and
# lres_spec (for our data).
indx = 0
dat = fits.getdata(fits_template)
header = fits.getheader(fits_template)
len_spec = header['NAXIS1']
start = header['CRVAL1']
spec_delt = header['CD1_1']
hres_spec = np.arange(start, start + len_spec*spec_delt, spec_delt)
indx = 0
# Fixing the template spectrum for velocity:
for wlength in hres_spec:
wlength = wlength + wlength*velocity/(2.99792458E8)
hres_spec[indx] = wlength
indx += 1
# Add _d at the end to signify that the variable is for the data and not
# for the template:
indx_d = 0
dat_d = fits.getdata(fits_data)
header_d = fits.getheader(fits_data)
len_spec_d = header_d['NAXIS1']
start_d = header_d['CRVAL1']
spec_delt_d = header_d['CD1_1']
lres_spec = np.linspace(start_d, start_d + (len_spec_d-1)*spec_delt_d, \
len_spec_d)
# Doing the interpolation. The new data will be the template data, but
# interpolated so that every data point corresponds to a wavelength value
# from our original lres_spec. An arbitrary data point would be
# (lres_spec[i], new_data[i]) for any i.
new_data = np.zeros(len_spec_d)
indx = 0
print 'Fitting stellar template to A star data format..'
for x in lres_spec:
for indxx1 in range(len(hres_spec)):
if hres_spec[indxx1] > x:
break
indxx0 = indxx1 - 1
x0 = hres_spec[indxx0]
x1 = hres_spec[indxx1]
y0 = dat[indxx0]
y1 = dat[indxx1]
y = y0 + ((y1 - y0)*(x - x0))/(x1 - x0)
new_data[indx] = y
indx += 1
print 'Stellar template data successfully modified.'
# Creating a new fits file and changing the headers to their new values.
hdulist = fits.open(fits_template)
hdu = hdulist[0]
hdu.header['NAXIS1'] = len(new_data)
hdu.header['CRVAL1'] = lres_spec[0]
hdu.header['CDELT1'] = lres_spec[1] - lres_spec[0]
hdu.header['CD1_1'] = lres_spec[1] - lres_spec[0]
hdu.data = new_data
try:
hdulist.writeto(fixed_template)
except IOError:
os.remove(fixed_template)
hdulist.writeto(fixed_template)
return new_data
def telluric_fits(fin_template, fits_data, telluric_norm = 'telluric_norm1.fits'):
'''Creates the fits file of the final normalized telluric spectrum.
'''
# Because the finished template data should have the same wavelength values
# and indices as the data, creating the actual telluric file is very simple.
hdulist = fits.open(fits_data)
hdu = hdulist[0]
data = hdu.data/(fin_template)
hdu.data = data/(np.median(data))
try:
hdulist.writeto(telluric_norm)
except IOError:
os.remove(telluric_norm)
hdulist.writeto(telluric_norm)
print "backing up telluric norm:", telluric_norm
shutil.copyfile(telluric_norm,reduce_dir+telluric_norm)
shutil.copyfile(telluric_norm,backupDir+telluric_norm)
def run_telluric(raw_data, red_data, ManualNames, tellistfile, astarskylistfile,\
fits_template, R, telluric_norm):
'''Modifies the template, then creates the telluric spectrum.
'''
fits_data = astar_spec(raw_data, red_data, ManualNames, tellistfile, astarskylistfile)
smooth_template = 'c_astartemplate.fits' # name after template is smoothed
fixed_template = 'v' + smooth_template # template is velocity fixed and in right format
# The A star will have some velocity - this function finds it and stores it for
# interpolation later.
velocity = velocity_fix(fits_template, fits_data)
# Finding the sigma (in pixel size) for the convolution and performs the convolution,
# creating a fits file with name specified in smooth_template above.
sigma = prepare_data(fits_template, fits_data, R)
print "The sigma for the convolution (in pixels of high-res spectrum) is", sigma
convolve(fits_template, sigma, smooth_template)
# Does a linear interpolation such that the indexed data points in the template are
# equal to the indexed data points from the A star, also fixing for velocity.
fin_template = lin_interpol(smooth_template, fits_data, velocity, fixed_template)
# Finally creates the .fits file using the spectrum from the A star
telluric_fits(fin_template, fits_data, telluric_norm = telluric_norm)
######################################################################
######################################################################
######################################################################
# STEP 4:
# REDUCTION OF SCIENCE DATA
#
# This is the final step that the others have all been preparing for.
# Having the final calibration files (calflat, arc, ronchiflat, shiftimage,
# bpm and telluric_norm), this portion of the script creates the final 3D data
# cubes.
#
######################################################################
######################################################################
######################################################################
def run_science(raw_data, red_data, rmReducedData, calflat, arc, ronchiflat, \
shiftimage, bpm, scilistfile, skylistfile, useSkyFile, telluric, \
ManualNames, reduced_all):
'''
Gemini NIFS data reduction script
Reduction for: SCIENCE DATA
- Merging of data cubes is not implemented here.
'''
###############################################################
# STEP 4.1: PREPARE IRAF #
###############################################################
# Import some useful python utilities
import sys
import getopt
import os
import time
import shutil
# Import the pyraf module and relevant packages
from pyraf import iraf
iraf.gemini()
iraf.nifs()
iraf.gnirs()
iraf.gemtools()
import pyfits
import numpy as np
import glob
# Unlearn the used tasks
iraf.unlearn(iraf.gemini,iraf.gemtools,iraf.gnirs,iraf.nifs)
# Create a log file and back up the previous one if it already exists
log = 'Science.log'
if os.path.exists(log):
t = time.localtime()
app = "_"+str(t[0])+str(t[1]).zfill(2)+str(t[2]).zfill(2)+'_'+ \
str(t[3]).zfill(2)+':'+str(t[4]).zfill(2)+':'+str(t[5]).zfill(2)
shutil.move(log,log+app)
iraf.set(stdimage='imt2048')
# Prepare the package for NIFS
iraf.nsheaders("nifs",logfile=log)
###############################################################
# STEP 4.2: SET REDUCTION FILE NAMES AND PATHS #
###############################################################
# Set clobber to 'yes' for the script. This still does not make the gemini tasks
# overwrite files, so you will likely have to remove files if you re-run the script.
user_clobber=iraf.envget("clobber")
iraf.reset(clobber='yes')
if ManualNames == False:
calib_names = open(red_data+'calibfiles','r')
ronchiflat = calib_names.readline().split()[2].strip() #'rgn'+"N20..."+'.fits'
bpm = calib_names.readline().split()[2].strip() #'flat_bpm.pl'
calflat = calib_names.readline().split()[2].strip() #'flat'
shiftimage = calib_names.readline().split()[2].strip() #'shiftFile'
arc = calib_names.readline().split()[2].strip() #'wrgn'+"N20..."+'.fits'
if rmReducedData:
# remove the data
print 'Removing previously reduced files'
sciFiles = np.loadtxt(scilistfile,dtype='str')
for k in np.arange(len(sciFiles)):
existingFiles = glob.glob('*?'+sciFiles[k])
for inFile in existingFiles:
print 'REMOVING: '+inFile
os.remove(inFile)
###########################################################################
# STEP 4.3: Reduce the Science Data #
###########################################################################
iraf.nfprepare("@"+scilistfile, rawpath=raw_data,
shiftimage=red_data+shiftimage,fl_vardq='yes',
bpm=red_data+bpm,
logfile=log)
if useSkyFile == None:
iraf.nfprepare("@"+skylistfile, rawpath=raw_data,
shiftimage=red_data+shiftimage,fl_vardq='yes',
bpm=red_data+bpm,
logfile=log)
nfiles = len(open(skylistfile).readlines())
skyoutfile = str(open(skylistfile, "r").readlines()[0]).strip()
if nfiles > 1:
iraf.gemcombine("n//@"+skylistfile,output="gn"+os.path.splitext(skyoutfile)[0],
fl_dqpr='yes',fl_vardq='yes',masktype="none",logfile=log)
useSkyFile = "gn"+skyoutfile
else:
useSkyFile = "n"+skyoutfile
#############################################################
# DATA REDUCTION HINT - #
# #
# At the present time, we found that there are problems #
# with the WCS coordinates. The automatic sky #
# frame ID and subtraction does not work very well in #
# "nsreduce" for NIFS data reductions if the number of sky #
# frames does not equal the number if science frames. As #
# a result, the sky subtraction in this script was set up #
# to work outside of the "nsreduce" call. This should work #
# for most modes of science acquisition. However you do #
# have to ensure that each frame in 'scilist' has a #
# corresponding frame in the "skylist" file. If you share #
# sky frames between differerent science exposures, you will#
# have to duplicate those in the skylist. #
#############################################################
# Read in the frame lists (removing '\n' line breaks from the strings)
scilist=open(scilistfile, "r").readlines()
scilist=[word.strip() for word in scilist]
for i in range(len(scilist)):
iraf.gemarith ("n"+scilist[i], "-", useSkyFile, "gn"+scilist[i], fl_vardq="yes",
logfile=log)
# Flat field and cut the data
iraf.nsreduce("gn@"+scilistfile, fl_cut='yes', fl_nsappw='yes', fl_dark='no', fl_sky='no',
fl_flat='yes', flatimage=red_data+calflat,
fl_vardq='yes',logfile=log)
# Interpolate over bad pixels flagged in the DQ plane
iraf.nffixbad("rgn@"+scilistfile,logfile=log)
# Derive the 2D to 3D spatial/spectral transformation
iraf.nsfitcoords("brgn@"+scilistfile,lamptransf=arc,
sdisttransf=ronchiflat,logfile=log, fl_int='no', lxorder=4, syorder=4)
# Apply the transformation determined in the nffitcoords step
iraf.nstransform("fbrgn@"+scilistfile, logfile=log)
#iraf.nftelluric("tfbrgn@"+scilistfile, telluric, logfile=log)
# Reformat the data into a 3-D datacube
iraf.nifcube ("tfbrgn@"+scilistfile, logfile=log)
# correct the data for telluric absorption features
telluric_data = fits.getdata(telluric)
cubes = open(scilistfile, 'r').readlines()
if not(os.path.isdir(reduced_all)):
print "Directory not found, making directory:"+reduced_all
os.mkdir(reduced_all)
# Copy the reduced files
for inx in range(len(cubes)):
print 'backing up science files: '+"ctfbrgn"+cubes[inx].strip()
shutil.copyfile("ctfbrgn"+cubes[inx].strip(), reduced_all + \
"ctfbrgn" + cubes[inx].strip())
print "The telluric file is", telluric
available = telluric_data > 0.1
for file1 in cubes:
file1 = "ctfbrgn" + file1.strip()
hdulist = fits.open(file1)
hdu = hdulist[1]
cube_data = hdu.data
print "Removing telluric absorption from", file1
for j in range(len(cube_data[0,:,0])):
for i in range(len(cube_data[0,0,:])):
cube_data[available,j,i] = cube_data[available,j,i]/telluric_data[available]
hdu.data = cube_data
try:
hdulist.writeto(reduced_all+'a'+file1)
except IOError:
os.remove(reduced_all+'a'+file1)
hdulist.writeto(reduced_all+'a'+file1)
###########################################################################
# Reset to user defaults #
###########################################################################
if user_clobber == "no":
iraf.set(clobber='no')
###########################################################################
# End of the Science Data Reduction #
# #
# The output of this reduction is a set of 3-D data cubes that have been #
# sky subtracted, flat fielded, cleaned for bad pixels, telluric #
# corrected and rectified into a cohesive datacube format. In the case #
# of this reduction, the final output files are called: actfbrgn+science, #
# or: actfbrgnN20100401S0182.fits #
# actfbrgnN20100401S0184.fits #
# actfbrgnN20100401S0186.fits #
# actfbrgnN20100401S0188.fits #
# #
# The meaning of the output prefixes are described below: #
# #
# g = gemcombined n=nfprepared s=skysubtracted r=nsreduced #
# b = bad pixel corrected f= run through nffitcoords #
# t = nftransformed a = corrected for telluric absorption features #
# c = rectified to a 3D datacube #
# #
# This script is meant to be a guideline as a method of a typical data #
# reduction for NIFS frames. Of course, NIFS PIs can add or skip steps #
# in this reduction as they deem fit in order to reduce their particular #
# datasets. #
# #
###########################################################################
def run_reduction():
if steps[0] == True:
run_basecalib(datDir, raw_dir, reduce_dir, backupDir, arcDir, useSkyLines, \
flatlist, flatdarklist, ronchilist, arcStr, arcStrDrk, steps_basec)
if steps[1] == True:
run_telluric(raw_data, red_data, ManualNames, tellistfile, \
astarskylistfile, fits_template, R, telluric_norm)
if steps[2] == True:
run_science(raw_data, red_data, rmReducedData, calflat, arc, ronchiflat, \
shiftimage, bpm, scilistfile, skylistfile, useSkyFile, telluric, \
ManualNames, reduced_all)
|
followthesheep/nifs_reduction_example
|
NIFS_Reduction_fin.py
|
Python
|
mit
| 65,850
|
[
"Gaussian"
] |
656a7348e729adc992f38daf6252f75456bc10a507719bd1651a73ec9604d4f8
|
# -*- coding: utf-8 -*-
"""
Acceptance tests for Video.
"""
import os
from ddt import ddt, unpack, data
from mock import patch
from nose.plugins.attrib import attr
from unittest import skipIf, skip
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from common.test.acceptance.tests.helpers import UniqueCourseTest, is_youtube_available, YouTubeStubConfig
from common.test.acceptance.pages.lms.video.video import VideoPage
from common.test.acceptance.pages.lms.tab_nav import TabNavPage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.course_nav import CourseNavPage
from common.test.acceptance.pages.lms.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.course_info import CourseInfoPage
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.tests.helpers import skip_if_browser
from flaky import flaky
VIDEO_SOURCE_PORT = 8777
HTML5_SOURCES = [
'http://localhost:{0}/gizmo.mp4'.format(VIDEO_SOURCE_PORT),
'http://localhost:{0}/gizmo.webm'.format(VIDEO_SOURCE_PORT),
'http://localhost:{0}/gizmo.ogv'.format(VIDEO_SOURCE_PORT),
]
HTML5_SOURCES_INCORRECT = [
'http://localhost:{0}/gizmo.mp99'.format(VIDEO_SOURCE_PORT),
]
@skipIf(is_youtube_available() is False, 'YouTube is not available!')
class VideoBaseTest(UniqueCourseTest):
"""
Base class for tests of the Video Player
Sets up the course and provides helper functions for the Video tests.
"""
def setUp(self):
"""
Initialization of pages and course fixture for video tests
"""
super(VideoBaseTest, self).setUp()
self.longMessage = True # pylint: disable=invalid-name
self.video = VideoPage(self.browser)
self.tab_nav = TabNavPage(self.browser)
self.course_nav = CourseNavPage(self.browser)
self.courseware = CoursewarePage(self.browser, self.course_id)
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.auth_page = AutoAuthPage(self.browser, course_id=self.course_id)
self.course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.metadata = None
self.assets = []
self.contents_of_verticals = None
self.youtube_configuration = {}
self.user_info = {}
# reset youtube stub server
self.addCleanup(YouTubeStubConfig.reset)
def navigate_to_video(self):
""" Prepare the course and get to the video and render it """
self._install_course_fixture()
self._navigate_to_courseware_video_and_render()
def navigate_to_video_no_render(self):
"""
Prepare the course and get to the video unit
however do not wait for it to render, because
the has been an error.
"""
self._install_course_fixture()
self._navigate_to_courseware_video_no_render()
def _install_course_fixture(self):
""" Install the course fixture that has been defined """
if self.assets:
self.course_fixture.add_asset(self.assets)
chapter_sequential = XBlockFixtureDesc('sequential', 'Test Section')
chapter_sequential.add_children(*self._add_course_verticals())
chapter = XBlockFixtureDesc('chapter', 'Test Chapter').add_children(chapter_sequential)
self.course_fixture.add_children(chapter)
self.course_fixture.install()
if len(self.youtube_configuration) > 0:
YouTubeStubConfig.configure(self.youtube_configuration)
def _add_course_verticals(self):
"""
Create XBlockFixtureDesc verticals
:return: a list of XBlockFixtureDesc
"""
xblock_verticals = []
_contents_of_verticals = self.contents_of_verticals
# Video tests require at least one vertical with a single video.
if not _contents_of_verticals:
_contents_of_verticals = [[{'display_name': 'Video', 'metadata': self.metadata}]]
for vertical_index, vertical in enumerate(_contents_of_verticals):
xblock_verticals.append(self._create_single_vertical(vertical, vertical_index))
return xblock_verticals
def _create_single_vertical(self, vertical_contents, vertical_index):
"""
Create a single course vertical of type XBlockFixtureDesc with category `vertical`.
A single course vertical can contain single or multiple video modules.
:param vertical_contents: a list of items for the vertical to contain
:param vertical_index: index for the vertical display name
:return: XBlockFixtureDesc
"""
xblock_course_vertical = XBlockFixtureDesc('vertical', 'Test Vertical-{0}'.format(vertical_index))
for video in vertical_contents:
xblock_course_vertical.add_children(
XBlockFixtureDesc('video', video['display_name'], metadata=video.get('metadata')))
return xblock_course_vertical
def _navigate_to_courseware_video(self):
""" Register for the course and navigate to the video unit """
self.auth_page.visit()
self.user_info = self.auth_page.user_info
self.course_info_page.visit()
self.tab_nav.go_to_tab('Course')
def _navigate_to_courseware_video_and_render(self):
""" Wait for the video player to render """
self._navigate_to_courseware_video()
self.video.wait_for_video_player_render()
def _navigate_to_courseware_video_no_render(self):
""" Wait for the video Xmodule but not for rendering """
self._navigate_to_courseware_video()
self.video.wait_for_video_class()
def metadata_for_mode(self, player_mode, additional_data=None):
"""
Create a dictionary for video player configuration according to `player_mode`
:param player_mode (str): Video player mode
:param additional_data (dict): Optional additional metadata.
:return: dict
"""
metadata = {}
if player_mode == 'html5':
metadata.update({
'youtube_id_1_0': '',
'youtube_id_0_75': '',
'youtube_id_1_25': '',
'youtube_id_1_5': '',
'html5_sources': HTML5_SOURCES
})
if player_mode == 'youtube_html5':
metadata.update({
'html5_sources': HTML5_SOURCES,
})
if player_mode == 'youtube_html5_unsupported_video':
metadata.update({
'html5_sources': HTML5_SOURCES_INCORRECT
})
if player_mode == 'html5_unsupported_video':
metadata.update({
'youtube_id_1_0': '',
'youtube_id_0_75': '',
'youtube_id_1_25': '',
'youtube_id_1_5': '',
'html5_sources': HTML5_SOURCES_INCORRECT
})
if additional_data:
metadata.update(additional_data)
return metadata
def go_to_sequential_position(self, position):
"""
Navigate to sequential specified by `video_display_name`
"""
self.courseware.go_to_sequential_position(position)
self.video.wait_for_video_player_render()
@attr(shard=4)
@ddt
class YouTubeVideoTest(VideoBaseTest):
""" Test YouTube Video Player """
def setUp(self):
super(YouTubeVideoTest, self).setUp()
def test_youtube_video_rendering_wo_html5_sources(self):
"""
Scenario: Video component is rendered in the LMS in Youtube mode without HTML5 sources
Given the course has a Video component in "Youtube" mode
Then the video has rendered in "Youtube" mode
"""
self.navigate_to_video()
# Verify that video has rendered in "Youtube" mode
self.assertTrue(self.video.is_video_rendered('youtube'))
def test_transcript_button_wo_english_transcript(self):
"""
Scenario: Transcript button works correctly w/o english transcript in Youtube mode
Given the course has a Video component in "Youtube" mode
And I have defined a non-english transcript for the video
And I have uploaded a non-english transcript file to assets
Then I see the correct text in the captions
"""
data = {'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('youtube', data)
self.assets.append('chinese_transcripts.srt')
self.navigate_to_video()
self.video.show_captions()
# Verify that we see "好 各位同学" text in the transcript
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
def test_cc_button(self):
"""
Scenario: CC button works correctly with transcript in YouTube mode
Given the course has a video component in "Youtube" mode
And I have defined a transcript for the video
Then I see the closed captioning element over the video
"""
data = {'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('youtube', data)
self.assets.append('chinese_transcripts.srt')
self.navigate_to_video()
# Show captions and make sure they're visible and cookie is set
self.video.show_closed_captions()
self.video.wait_for_closed_captions()
self.assertTrue(self.video.is_closed_captions_visible)
self.video.reload_page()
self.assertTrue(self.video.is_closed_captions_visible)
# Hide captions and make sure they're hidden and cookie is unset
self.video.hide_closed_captions()
self.video.wait_for_closed_captions_to_be_hidden()
self.video.reload_page()
self.video.wait_for_closed_captions_to_be_hidden()
def test_transcript_button_transcripts_and_sub_fields_empty(self):
"""
Scenario: Transcript button works correctly if transcripts and sub fields are empty,
but transcript file exists in assets (Youtube mode of Video component)
Given the course has a Video component in "Youtube" mode
And I have uploaded a .srt.sjson file to assets
Then I see the correct english text in the captions
"""
self._install_course_fixture()
self.course_fixture.add_asset(['subs_3_yD_cEKoCk.srt.sjson'])
self.course_fixture._upload_assets()
self._navigate_to_courseware_video_and_render()
self.video.show_captions()
# Verify that we see "Welcome to edX." text in the captions
self.assertIn('Welcome to edX.', self.video.captions_text)
def test_transcript_button_hidden_no_translations(self):
"""
Scenario: Transcript button is hidden if no translations
Given the course has a Video component in "Youtube" mode
Then the "Transcript" button is hidden
"""
self.navigate_to_video()
self.assertFalse(self.video.is_button_shown('transcript_button'))
def test_fullscreen_video_alignment_with_transcript_hidden(self):
"""
Scenario: Video is aligned with transcript hidden in fullscreen mode
Given the course has a Video component in "Youtube" mode
When I view the video at fullscreen
Then the video with the transcript hidden is aligned correctly
"""
self.navigate_to_video()
# click video button "fullscreen"
self.video.click_player_button('fullscreen')
# check if video aligned correctly without enabled transcript
self.assertTrue(self.video.is_aligned(False))
def test_download_button_wo_english_transcript(self):
"""
Scenario: Download button works correctly w/o english transcript in YouTube mode
Given the course has a Video component in "Youtube" mode
And I have defined a downloadable non-english transcript for the video
And I have uploaded a non-english transcript file to assets
Then I can download the transcript in "srt" format
"""
data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
self.assets.append('chinese_transcripts.srt')
# go to video
self.navigate_to_video()
# check if we can download transcript in "srt" format that has text "好 各位同学"
unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
def test_download_button_two_transcript_languages(self):
"""
Scenario: Download button works correctly for multiple transcript languages
Given the course has a Video component in "Youtube" mode
And I have defined a downloadable non-english transcript for the video
And I have defined english subtitles for the video
Then I see the correct english text in the captions
And the english transcript downloads correctly
And I see the correct non-english text in the captions
And the non-english transcript downloads correctly
"""
self.assets.extend(['chinese_transcripts.srt', 'subs_3_yD_cEKoCk.srt.sjson'])
data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}, 'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
# check if "Welcome to edX." text in the captions
self.assertIn('Welcome to edX.', self.video.captions_text)
# check if we can download transcript in "srt" format that has text "Welcome to edX."
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', 'Welcome to edX.'))
# select language with code "zh"
self.assertTrue(self.video.select_language('zh'))
# check if we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
# check if we can download transcript in "srt" format that has text "好 各位同学"
unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
def test_fullscreen_video_alignment_on_transcript_toggle(self):
"""
Scenario: Video is aligned correctly on transcript toggle in fullscreen mode
Given the course has a Video component in "Youtube" mode
And I have uploaded a .srt.sjson file to assets
And I have defined subtitles for the video
When I view the video at fullscreen
Then the video with the transcript enabled is aligned correctly
And the video with the transcript hidden is aligned correctly
"""
self.assets.append('subs_3_yD_cEKoCk.srt.sjson')
data = {'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
# make sure captions are opened
self.video.show_captions()
# click video button "fullscreen"
self.video.click_player_button('fullscreen')
# check if video aligned correctly with enabled transcript
self.assertTrue(self.video.is_aligned(True))
# click video button "transcript"
self.video.click_player_button('transcript_button')
# check if video aligned correctly without enabled transcript
self.assertTrue(self.video.is_aligned(False))
def test_video_rendering_with_default_response_time(self):
"""
Scenario: Video is rendered in Youtube mode when the YouTube Server responds quickly
Given the YouTube server response time less than 1.5 seconds
And the course has a Video component in "Youtube_HTML5" mode
Then the video has rendered in "Youtube" mode
"""
# configure youtube server
self.youtube_configuration['time_to_response'] = 0.4
self.metadata = self.metadata_for_mode('youtube_html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('youtube'))
def test_video_rendering_wo_default_response_time(self):
"""
Scenario: Video is rendered in HTML5 when the YouTube Server responds slowly
Given the YouTube server response time is greater than 1.5 seconds
And the course has a Video component in "Youtube_HTML5" mode
Then the video has rendered in "HTML5" mode
"""
# configure youtube server
self.youtube_configuration['time_to_response'] = 2.0
self.metadata = self.metadata_for_mode('youtube_html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
def test_video_with_youtube_blocked_with_default_response_time(self):
"""
Scenario: Video is rendered in HTML5 mode when the YouTube API is blocked
Given the YouTube API is blocked
And the course has a Video component in "Youtube_HTML5" mode
Then the video has rendered in "HTML5" mode
And only one video has rendered
"""
# configure youtube server
self.youtube_configuration.update({
'youtube_api_blocked': True,
})
self.metadata = self.metadata_for_mode('youtube_html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
# The video should only be loaded once
self.assertEqual(len(self.video.q(css='video')), 1)
def test_video_with_youtube_blocked_delayed_response_time(self):
"""
Scenario: Video is rendered in HTML5 mode when the YouTube API is blocked
Given the YouTube server response time is greater than 1.5 seconds
And the YouTube API is blocked
And the course has a Video component in "Youtube_HTML5" mode
Then the video has rendered in "HTML5" mode
And only one video has rendered
"""
# configure youtube server
self.youtube_configuration.update({
'time_to_response': 2.0,
'youtube_api_blocked': True,
})
self.metadata = self.metadata_for_mode('youtube_html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
# The video should only be loaded once
self.assertEqual(len(self.video.q(css='video')), 1)
def test_html5_video_rendered_with_youtube_captions(self):
"""
Scenario: User should see Youtube captions for If there are no transcripts
available for HTML5 mode
Given that I have uploaded a .srt.sjson file to assets for Youtube mode
And the YouTube API is blocked
And the course has a Video component in "Youtube_HTML5" mode
And Video component rendered in HTML5 mode
And Html5 mode video has no transcripts
When I see the captions for HTML5 mode video
Then I should see the Youtube captions
"""
self.assets.append('subs_3_yD_cEKoCk.srt.sjson')
# configure youtube server
self.youtube_configuration.update({
'time_to_response': 2.0,
'youtube_api_blocked': True,
})
data = {'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('youtube_html5', additional_data=data)
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
# check if caption button is visible
self.assertTrue(self.video.is_button_shown('transcript_button'))
self._verify_caption_text('Welcome to edX.')
@data(('srt', '00:00:00,260'), ('txt', 'Welcome to edX.'))
@unpack
def test_download_transcript_links_work_correctly(self, file_type, search_text):
"""
Scenario: Download 'srt' transcript link works correctly.
Download 'txt' transcript link works correctly.
Given the course has Video components A and B in "Youtube" mode
And Video component C in "HTML5" mode
And I have defined downloadable transcripts for the videos
Then I can download a transcript for Video A in "srt" format
And the Download Transcript menu does not exist for Video C
"""
data_a = {'sub': '3_yD_cEKoCk', 'download_track': True}
youtube_a_metadata = self.metadata_for_mode('youtube', additional_data=data_a)
self.assets.append('subs_3_yD_cEKoCk.srt.sjson')
data_b = {'youtube_id_1_0': 'b7xgknqkQk8', 'sub': 'b7xgknqkQk8', 'download_track': True}
youtube_b_metadata = self.metadata_for_mode('youtube', additional_data=data_b)
self.assets.append('subs_b7xgknqkQk8.srt.sjson')
data_c = {'track': 'http://example.org/', 'download_track': True}
html5_c_metadata = self.metadata_for_mode('html5', additional_data=data_c)
self.contents_of_verticals = [
[{'display_name': 'A', 'metadata': youtube_a_metadata}],
[{'display_name': 'B', 'metadata': youtube_b_metadata}],
[{'display_name': 'C', 'metadata': html5_c_metadata}]
]
# open the section with videos (open vertical containing video "A")
self.navigate_to_video()
# check if we can download transcript in "srt" format that has text "00:00:00,260"
self.assertTrue(self.video.downloaded_transcript_contains_text(file_type, search_text))
# open vertical containing video "C"
self.course_nav.go_to_vertical('Test Vertical-2')
# menu "download_transcript" doesn't exist
self.assertFalse(self.video.is_menu_present('download_transcript'))
def _verify_caption_text(self, text):
self.video._wait_for(
lambda: (text in self.video.captions_text),
u'Captions contain "{}" text'.format(text),
timeout=5
)
def _verify_closed_caption_text(self, text):
"""
Scenario: returns True if the captions are visible, False is else
"""
self.video.wait_for(
lambda: (text in self.video.closed_captions_text),
u'Closed captions contain "{}" text'.format(text),
timeout=5
)
def test_video_language_menu_working(self):
"""
Scenario: Language menu works correctly in Video component
Given the course has a Video component in "Youtube" mode
And I have defined multiple language transcripts for the videos
And I make sure captions are closed
And I see video menu "language" with correct items
And I select language with code "zh"
Then I see "好 各位同学" text in the captions
And I select language with code "en"
Then I see "Welcome to edX." text in the captions
"""
self.assets.extend(['chinese_transcripts.srt', 'subs_3_yD_cEKoCk.srt.sjson'])
data = {'transcripts': {"zh": "chinese_transcripts.srt"}, 'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.hide_captions()
correct_languages = {'en': 'English', 'zh': 'Chinese'}
self.assertEqual(self.video.caption_languages, correct_languages)
self.video.select_language('zh')
unicode_text = "好 各位同学".decode('utf-8')
self._verify_caption_text(unicode_text)
self.video.select_language('en')
self._verify_caption_text('Welcome to edX.')
def test_video_language_menu_working_closed_captions(self):
"""
Scenario: Language menu works correctly in Video component, checks closed captions
Given the course has a Video component in "Youtube" mode
And I have defined multiple language transcripts for the videos
And I make sure captions are closed
And I see video menu "language" with correct items
And I select language with code "en"
Then I see "Welcome to edX." text in the closed captions
And I select language with code "zh"
Then I see "我们今天要讲的题目是" text in the closed captions
"""
self.assets.extend(['chinese_transcripts.srt', 'subs_3_yD_cEKoCk.srt.sjson'])
data = {'transcripts': {"zh": "chinese_transcripts.srt"}, 'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.show_closed_captions()
correct_languages = {'en': 'English', 'zh': 'Chinese'}
self.assertEqual(self.video.caption_languages, correct_languages)
# we start the video, then pause it to activate the transcript
self.video.click_player_button('play')
self.video.wait_for_position('0:03')
self.video.click_player_button('pause')
self.video.select_language('en')
self.video.click_first_line_in_transcript()
self._verify_closed_caption_text('Welcome to edX.')
self.video.select_language('zh')
unicode_text = "我们今天要讲的题目是".decode('utf-8')
self.video.click_first_line_in_transcript()
self._verify_closed_caption_text(unicode_text)
def test_multiple_videos_in_sequentials_load_and_work(self):
"""
Scenario: Multiple videos in sequentials all load and work, switching between sequentials
Given it has videos "A,B" in "Youtube" mode in position "1" of sequential
And videos "C,D" in "Youtube" mode in position "2" of sequential
"""
self.contents_of_verticals = [
[{'display_name': 'A'}, {'display_name': 'B'}],
[{'display_name': 'C'}, {'display_name': 'D'}]
]
tab1_video_names = ['A', 'B']
tab2_video_names = ['C', 'D']
def execute_video_steps(video_names):
"""
Execute video steps
"""
for video_name in video_names:
self.video.use_video(video_name)
self.video.click_player_button('play')
self.assertIn(self.video.state, ['playing', 'buffering'])
self.video.click_player_button('pause')
# go to video
self.navigate_to_video()
execute_video_steps(tab1_video_names)
# go to second sequential position
# import ipdb; ipdb.set_trace()
self.go_to_sequential_position(2)
execute_video_steps(tab2_video_names)
# go back to first sequential position
# we are again playing tab 1 videos to ensure that switching didn't broke some video functionality.
# import ipdb; ipdb.set_trace()
self.go_to_sequential_position(1)
execute_video_steps(tab1_video_names)
def test_video_component_stores_speed_correctly_for_multiple_videos(self):
"""
Scenario: Video component stores speed correctly when each video is in separate sequential
Given I have a video "A" in "Youtube" mode in position "1" of sequential
And a video "B" in "Youtube" mode in position "2" of sequential
And a video "C" in "HTML5" mode in position "3" of sequential
"""
# vertical titles are created in VideoBaseTest._create_single_vertical
# and are of the form Test Vertical-{_} where _ is the index in self.contents_of_verticals
self.contents_of_verticals = [
[{'display_name': 'A'}], [{'display_name': 'B'}],
[{'display_name': 'C', 'metadata': self.metadata_for_mode('html5')}]
]
self.navigate_to_video()
# select the "2.0" speed on video "A"
self.course_nav.go_to_vertical('Test Vertical-0')
self.video.wait_for_video_player_render()
self.video.speed = '2.0'
# select the "0.50" speed on video "B"
self.course_nav.go_to_vertical('Test Vertical-1')
self.video.wait_for_video_player_render()
self.video.speed = '0.50'
# open video "C"
self.course_nav.go_to_vertical('Test Vertical-2')
self.video.wait_for_video_player_render()
# Since the playback speed was set to .5 in "B", this video will also be impacted
# because a playback speed has never explicitly been set for it. However, this video
# does not have a .5 playback option, so the closest possible (.75) should be selected.
self.video.verify_speed_changed('0.75x')
# go to the vertical containing video "A"
self.course_nav.go_to_vertical('Test Vertical-0')
# Video "A" should still play at speed 2.0 because it was explicitly set to that.
self.assertEqual(self.video.speed, '2.0x')
# reload the page
self.video.reload_page()
# go to the vertical containing video "A"
self.course_nav.go_to_vertical('Test Vertical-0')
# check if video "A" should start playing at speed "2.0"
self.assertEqual(self.video.speed, '2.0x')
# select the "1.0" speed on video "A"
self.video.speed = '1.0'
# go to the vertical containing "B"
self.course_nav.go_to_vertical('Test Vertical-1')
# Video "B" should still play at speed .5 because it was explicitly set to that.
self.assertEqual(self.video.speed, '0.50x')
# go to the vertical containing video "C"
self.course_nav.go_to_vertical('Test Vertical-2')
# The change of speed for Video "A" should impact Video "C" because it still has
# not been explicitly set to a speed.
self.video.verify_speed_changed('1.0x')
def test_video_has_correct_transcript(self):
"""
Scenario: Youtube video has correct transcript if fields for other speeds are filled
Given it has a video in "Youtube" mode
And I have uploaded multiple transcripts
And I make sure captions are opened
Then I see "Welcome to edX." text in the captions
And I select the "1.50" speed
And I reload the page with video
Then I see "Welcome to edX." text in the captions
And I see duration "1:56"
"""
self.assets.extend(['subs_3_yD_cEKoCk.srt.sjson', 'subs_b7xgknqkQk8.srt.sjson'])
data = {'sub': '3_yD_cEKoCk', 'youtube_id_1_5': 'b7xgknqkQk8'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.show_captions()
self.assertIn('Welcome to edX.', self.video.captions_text)
self.video.speed = '1.50'
self.video.reload_page()
self.assertIn('Welcome to edX.', self.video.captions_text)
self.assertTrue(self.video.duration, '1.56')
def test_video_position_stored_correctly_wo_seek(self):
"""
Scenario: Video component stores position correctly when page is reloaded
Given the course has a Video component in "Youtube" mode
Then the video has rendered in "Youtube" mode
And I click video button "play""
Then I wait until video reaches at position "0.03"
And I click video button "pause"
And I reload the page with video
And I click video button "play""
And I click video button "pause"
Then video slider should be Equal or Greater than "0:03"
"""
self.navigate_to_video()
self.video.click_player_button('play')
self.video.wait_for_position('0:03')
self.video.click_player_button('pause')
self.video.reload_page()
self.video.click_player_button('play')
self.video.click_player_button('pause')
self.assertGreaterEqual(self.video.seconds, 3)
@skip("Intermittently fails 03 June 2014")
def test_video_position_stored_correctly_with_seek(self):
"""
Scenario: Video component stores position correctly when page is reloaded
Given the course has a Video component in "Youtube" mode
Then the video has rendered in "Youtube" mode
And I click video button "play""
And I click video button "pause"
Then I seek video to "0:10" position
And I click video button "play""
And I click video button "pause"
And I reload the page with video
Then video slider should be Equal or Greater than "0:10"
"""
self.navigate_to_video()
self.video.click_player_button('play')
self.video.seek('0:10')
self.video.click_player_button('pause')
self.video.reload_page()
self.video.click_player_button('play')
self.video.click_player_button('pause')
self.assertGreaterEqual(self.video.seconds, 10)
def test_simplified_and_traditional_chinese_transcripts(self):
"""
Scenario: Simplified and Traditional Chinese transcripts work as expected in Youtube mode
Given the course has a Video component in "Youtube" mode
And I have defined a Simplified Chinese transcript for the video
And I have defined a Traditional Chinese transcript for the video
Then I see the correct subtitle language options in cc menu
Then I see the correct text in the captions for Simplified and Traditional Chinese transcripts
And I can download the transcripts for Simplified and Traditional Chinese
And video subtitle menu has 'zh_HANS', 'zh_HANT' translations for 'Simplified Chinese'
and 'Traditional Chinese' respectively
"""
data = {
'download_track': True,
'transcripts': {'zh_HANS': 'simplified_chinese.srt', 'zh_HANT': 'traditional_chinese.srt'}
}
self.metadata = self.metadata_for_mode('youtube', data)
self.assets.extend(['simplified_chinese.srt', 'traditional_chinese.srt'])
self.navigate_to_video()
langs = {'zh_HANS': '在线学习是革', 'zh_HANT': '在線學習是革'}
for lang_code, text in langs.items():
self.assertTrue(self.video.select_language(lang_code))
unicode_text = text.decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
self.assertEqual(self.video.caption_languages, {'zh_HANS': 'Simplified Chinese', 'zh_HANT': 'Traditional Chinese'})
def test_video_bumper_render(self):
"""
Scenario: Multiple videos with bumper in sequentials all load and work, switching between sequentials
Given it has videos "A,B" in "Youtube" and "HTML5" modes in position "1" of sequential
And video "C" in "Youtube" mode in position "2" of sequential
When I open sequential position "1"
Then I see video "B" has a poster
When I click on it
Then I see video bumper is playing
When I skip the bumper
Then I see the main video
When I click on video "A"
Then the main video starts playing
When I open sequential position "2"
And click on the poster
Then the main video starts playing
Then I see that the main video starts playing once I go back to position "2" of sequential
When I reload the page
Then I see that the main video starts playing when I click on the poster
"""
additional_data = {
u'video_bumper': {
u'value': {
"transcripts": {},
"video_id": "video_001"
}
}
}
self.contents_of_verticals = [
[{'display_name': 'A'}, {'display_name': 'B', 'metadata': self.metadata_for_mode('html5')}],
[{'display_name': 'C'}]
]
tab1_video_names = ['A', 'B']
tab2_video_names = ['C']
def execute_video_steps(video_names):
"""
Execute video steps
"""
for video_name in video_names:
self.video.use_video(video_name)
self.assertTrue(self.video.is_poster_shown)
self.video.click_on_poster()
self.video.wait_for_video_player_render(autoplay=True)
self.assertIn(self.video.state, ['playing', 'buffering', 'finished'])
self.course_fixture.add_advanced_settings(additional_data)
self.navigate_to_video_no_render()
self.video.use_video('B')
self.assertTrue(self.video.is_poster_shown)
self.video.click_on_poster()
self.video.wait_for_video_bumper_render()
self.assertIn(self.video.state, ['playing', 'buffering', 'finished'])
self.video.click_player_button('skip_bumper')
# no autoplay here, maybe video is too small, so pause is not switched
self.video.wait_for_video_player_render()
self.assertIn(self.video.state, ['playing', 'buffering', 'finished'])
self.video.use_video('A')
execute_video_steps(['A'])
# go to second sequential position
self.courseware.go_to_sequential_position(2)
execute_video_steps(tab2_video_names)
# go back to first sequential position
# we are again playing tab 1 videos to ensure that switching didn't broke some video functionality.
self.courseware.go_to_sequential_position(1)
execute_video_steps(tab1_video_names)
self.video.browser.refresh()
execute_video_steps(tab1_video_names)
@attr(shard=4)
class YouTubeHtml5VideoTest(VideoBaseTest):
""" Test YouTube HTML5 Video Player """
def setUp(self):
super(YouTubeHtml5VideoTest, self).setUp()
@flaky # TODO fix this, see TNL-1642
def test_youtube_video_rendering_with_unsupported_sources(self):
"""
Scenario: Video component is rendered in the LMS in Youtube mode
with HTML5 sources that doesn't supported by browser
Given the course has a Video component in "Youtube_HTML5_Unsupported_Video" mode
Then the video has rendered in "Youtube" mode
"""
self.metadata = self.metadata_for_mode('youtube_html5_unsupported_video')
self.navigate_to_video()
# Verify that the video has rendered in "Youtube" mode
self.assertTrue(self.video.is_video_rendered('youtube'))
@attr(shard=4)
class Html5VideoTest(VideoBaseTest):
""" Test HTML5 Video Player """
def setUp(self):
super(Html5VideoTest, self).setUp()
def test_autoplay_disabled_for_video_component(self):
"""
Scenario: Autoplay is disabled by default for a Video component
Given the course has a Video component in "HTML5" mode
When I view the Video component
Then it does not have autoplay enabled
"""
self.metadata = self.metadata_for_mode('html5')
self.navigate_to_video()
# Verify that the video has autoplay mode disabled
self.assertFalse(self.video.is_autoplay_enabled)
def test_html5_video_rendering_with_unsupported_sources(self):
"""
Scenario: LMS displays an error message for HTML5 sources that are not supported by browser
Given the course has a Video component in "HTML5_Unsupported_Video" mode
When I view the Video component
Then and error message is shown
And the error message has the correct text
"""
self.metadata = self.metadata_for_mode('html5_unsupported_video')
self.navigate_to_video_no_render()
# Verify that error message is shown
self.assertTrue(self.video.is_error_message_shown)
# Verify that error message has correct text
correct_error_message_text = 'No playable video sources found.'
self.assertIn(correct_error_message_text, self.video.error_message_text)
# Verify that spinner is not shown
self.assertFalse(self.video.is_spinner_shown)
def test_download_button_wo_english_transcript(self):
"""
Scenario: Download button works correctly w/o english transcript in HTML5 mode
Given the course has a Video component in "HTML5" mode
And I have defined a downloadable non-english transcript for the video
And I have uploaded a non-english transcript file to assets
Then I see the correct non-english text in the captions
And the non-english transcript downloads correctly
"""
data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
self.assets.append('chinese_transcripts.srt')
# go to video
self.navigate_to_video()
# check if we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
# check if we can download transcript in "srt" format that has text "好 各位同学"
unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
def test_download_button_two_transcript_languages(self):
"""
Scenario: Download button works correctly for multiple transcript languages in HTML5 mode
Given the course has a Video component in "HTML5" mode
And I have defined a downloadable non-english transcript for the video
And I have defined english subtitles for the video
Then I see the correct english text in the captions
And the english transcript downloads correctly
And I see the correct non-english text in the captions
And the non-english transcript downloads correctly
"""
self.assets.extend(['chinese_transcripts.srt', 'subs_3_yD_cEKoCk.srt.sjson'])
data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}, 'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
# go to video
self.navigate_to_video()
# check if "Welcome to edX." text in the captions
self.assertIn('Welcome to edX.', self.video.captions_text)
# check if we can download transcript in "srt" format that has text "Welcome to edX."
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', 'Welcome to edX.'))
# select language with code "zh"
self.assertTrue(self.video.select_language('zh'))
# check if we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
# Then I can download transcript in "srt" format that has text "好 各位同学"
unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
def test_full_screen_video_alignment_with_transcript_visible(self):
"""
Scenario: Video is aligned correctly with transcript enabled in fullscreen mode
Given the course has a Video component in "HTML5" mode
And I have uploaded a .srt.sjson file to assets
And I have defined subtitles for the video
When I show the captions
And I view the video at fullscreen
Then the video with the transcript enabled is aligned correctly
"""
self.assets.append('subs_3_yD_cEKoCk.srt.sjson')
data = {'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
# go to video
self.navigate_to_video()
# make sure captions are opened
self.video.show_captions()
# click video button "fullscreen"
self.video.click_player_button('fullscreen')
# check if video aligned correctly with enabled transcript
self.assertTrue(self.video.is_aligned(True))
def test_cc_button_with_english_transcript(self):
"""
Scenario: CC button works correctly with only english transcript in HTML5 mode
Given the course has a Video component in "HTML5" mode
And I have defined english subtitles for the video
And I have uploaded an english transcript file to assets
Then I see the correct text in the captions
"""
self.assets.append('subs_3_yD_cEKoCk.srt.sjson')
data = {'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
# go to video
self.navigate_to_video()
# make sure captions are opened
self.video.show_captions()
# check if we see "Welcome to edX." text in the captions
self.assertIn("Welcome to edX.", self.video.captions_text)
def test_cc_button_wo_english_transcript(self):
"""
Scenario: CC button works correctly w/o english transcript in HTML5 mode
Given the course has a Video component in "HTML5" mode
And I have defined a non-english transcript for the video
And I have uploaded a non-english transcript file to assets
Then I see the correct text in the captions
"""
self.assets.append('chinese_transcripts.srt')
data = {'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
# go to video
self.navigate_to_video()
# make sure captions are opened
self.video.show_captions()
# check if we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
def test_video_rendering(self):
"""
Scenario: Video component is fully rendered in the LMS in HTML5 mode
Given the course has a Video component in "HTML5" mode
Then the video has rendered in "HTML5" mode
And video sources are correct
"""
self.metadata = self.metadata_for_mode('html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
self.assertTrue(all([source in HTML5_SOURCES for source in self.video.sources]))
@attr(shard=4)
class YouTubeQualityTest(VideoBaseTest):
""" Test YouTube Video Quality Button """
def setUp(self):
super(YouTubeQualityTest, self).setUp()
@skip_if_browser('firefox')
def test_quality_button_visibility(self):
"""
Scenario: Quality button appears on play.
Given the course has a Video component in "Youtube" mode
Then I see video button "quality" is hidden
And I click video button "play"
Then I see video button "quality" is visible
"""
self.navigate_to_video()
self.assertFalse(self.video.is_quality_button_visible)
self.video.click_player_button('play')
self.video.wait_for(lambda: self.video.is_quality_button_visible, 'waiting for quality button to appear')
@skip_if_browser('firefox')
def test_quality_button_works_correctly(self):
"""
Scenario: Quality button works correctly.
Given the course has a Video component in "Youtube" mode
And I click video button "play"
And I see video button "quality" is inactive
And I click video button "quality"
Then I see video button "quality" is active
"""
self.navigate_to_video()
self.video.click_player_button('play')
self.video.wait_for(lambda: self.video.is_quality_button_visible, 'waiting for quality button to appear')
self.assertFalse(self.video.is_quality_button_active)
self.video.click_player_button('quality')
self.video.wait_for(lambda: self.video.is_quality_button_active, 'waiting for quality button activation')
@attr(shard=4)
class DragAndDropTest(VideoBaseTest):
"""
Tests draggability of closed captions within videos.
"""
def setUp(self):
super(DragAndDropTest, self).setUp()
def test_if_captions_are_draggable(self):
"""
Loads transcripts so that closed-captioning is available.
Ensures they are draggable by checking start and dropped location.
"""
self.assets.append('subs_3_yD_cEKoCk.srt.sjson')
data = {'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
self.video.show_closed_captions()
self.video.wait_for_closed_captions()
self.assertTrue(self.video.is_closed_captions_visible)
action = ActionChains(self.browser)
captions = self.browser.find_element(By.CLASS_NAME, 'closed-captions')
captions_start = captions.location
action.drag_and_drop_by_offset(captions, 0, -15).perform()
captions_end = captions.location
# We have to branch here due to unexpected behaviour of chrome.
# Chrome sets the y offset of element to 834 instead of 650
if self.browser.name == 'chrome':
self.assertEqual(
captions_end.get('y') - 168,
captions_start.get('y'),
'Closed captions did not get dragged.'
)
else:
self.assertEqual(
captions_end.get('y') + 16,
captions_start.get('y'),
'Closed captions did not get dragged.'
)
@attr('a11y')
class LMSVideoModuleA11yTest(VideoBaseTest):
"""
LMS Video Accessibility Test Class
"""
def setUp(self):
browser = os.environ.get('SELENIUM_BROWSER', 'firefox')
# the a11y tests run in CI under phantomjs which doesn't
# support html5 video or flash player, so the video tests
# don't work in it. We still want to be able to run these
# tests in CI, so override the browser setting if it is
# phantomjs.
if browser == 'phantomjs':
browser = 'firefox'
with patch.dict(os.environ, {'SELENIUM_BROWSER': browser}):
super(LMSVideoModuleA11yTest, self).setUp()
def test_video_player_a11y(self):
# load transcripts so we can test skipping to
self.assets.extend(['english_single_transcript.srt', 'subs_3_yD_cEKoCk.srt.sjson'])
data = {'transcripts': {"en": "english_single_transcript.srt"}, 'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.show_captions()
# limit the scope of the audit to the video player only.
self.video.a11y_audit.config.set_scope(
include=["div.video"]
)
self.video.a11y_audit.check_for_accessibility_errors()
|
itsjeyd/edx-platform
|
common/test/acceptance/tests/video/test_video_module.py
|
Python
|
agpl-3.0
| 51,370
|
[
"VisIt"
] |
88130f0ad85ea823e2b9bc8be0562b76c11ad92c3f0476c4bdfdfe77b7c2927e
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`mediashoutimport` module provides the functionality for importing
a MediaShout database into the OpenLP database.
"""
import pyodbc
from openlp.core.lib import translate
from openlp.plugins.songs.lib.songimport import SongImport
VERSE_TAGS = ['V', 'C', 'B', 'O', 'P', 'I', 'E']
class MediaShoutImport(SongImport):
"""
The :class:`MediaShoutImport` class provides the ability to import the
MediaShout Access Database
"""
def __init__(self, manager, **kwargs):
"""
Initialise the MediaShout importer.
"""
SongImport.__init__(self, manager, **kwargs)
def doImport(self):
"""
Receive a single file to import.
"""
try:
conn = pyodbc.connect('DRIVER={Microsoft Access Driver (*.mdb)};'
'DBQ=%s;PWD=6NOZ4eHK7k' % self.import_source)
except:
# Unfortunately no specific exception type
self.logError(self.import_source,
translate('SongsPlugin.MediaShoutImport', 'Unable to open the MediaShout database.'))
return
cursor = conn.cursor()
cursor.execute('SELECT Record, Title, Author, Copyright, '
'SongID, CCLI, Notes FROM Songs ORDER BY Title')
songs = cursor.fetchall()
self.import_wizard.progress_bar.setMaximum(len(songs))
for song in songs:
if self.stop_import_flag:
break
cursor.execute('SELECT Type, Number, Text FROM Verses '
'WHERE Record = %s ORDER BY Type, Number' % song.Record)
verses = cursor.fetchall()
cursor.execute('SELECT Type, Number, POrder FROM PlayOrder '
'WHERE Record = %s ORDER BY POrder' % song.Record)
verse_order = cursor.fetchall()
cursor.execute('SELECT Name FROM Themes INNER JOIN SongThemes '
'ON SongThemes.ThemeId = Themes.ThemeId '
'WHERE SongThemes.Record = %s' % song.Record)
topics = cursor.fetchall()
cursor.execute('SELECT Name FROM Groups INNER JOIN SongGroups '
'ON SongGroups.GroupId = Groups.GroupId '
'WHERE SongGroups.Record = %s' % song.Record)
topics += cursor.fetchall()
self.processSong(song, verses, verse_order, topics)
def processSong(self, song, verses, verse_order, topics):
"""
Create the song, i.e. title, verse etc.
"""
self.setDefaults()
self.title = song.Title
self.parse_author(song.Author)
self.addCopyright(song.Copyright)
self.comments = song.Notes
for topic in topics:
self.topics.append(topic.Name)
if '-' in song.SongID:
self.songBookName, self.songNumber = song.SongID.split('-', 1)
else:
self.songBookName = song.SongID
for verse in verses:
tag = VERSE_TAGS[verse.Type] + str(verse.Number) if verse.Type < len(VERSE_TAGS) else 'O'
self.addVerse(verse.Text, tag)
for order in verse_order:
if order.Type < len(VERSE_TAGS):
self.verseOrderList.append(VERSE_TAGS[order.Type] + str(order.Number))
self.finish()
|
marmyshev/item_title
|
openlp/plugins/songs/lib/mediashoutimport.py
|
Python
|
gpl-2.0
| 5,360
|
[
"Brian"
] |
03e691b762db3714fbf8017b2a5923143f87c7df127f81ba612f16f396d84b83
|
#WCS response decoder.
#Decodes response from a WCS (either a Coverages XML document or a Multipart MIME) and extracts the urls of the coverage data.
#Copyright (c) 2007 STFC <http://www.stfc.ac.uk>
#Author: Dominic Lowe, STFC
#contact email: d.lowe@rl.ac.uk
#
# Multipart MIME decoding based on http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/86676
#example: used in conjunction with ows lib wcs:
#from owslib import wcsdecoder
#u=wcs.getcoverage(identifier=['TuMYrRQ4'], timeSequence=['2792-06-01T00:00:00.0'], bbox=(-112,36,-106,41),format='application/netcdf', store='true')
#decoder=wcsdecoder.WCSDecoder(u)
#decoder.getCoverages()
from __future__ import (absolute_import, division, print_function)
import os
from owscapable.etree import etree
import email
import errno
class WCSDecoder(object):
def __init__(self, u):
''' initiate with a urllib url object.'''
self.u=u
self._getType()
def _getType(self):
''' determine whether it is a Multipart Mime or a Coverages XML file'''
#what's the best way to test this?
#for now read start of file
tempu=self.u
if tempu.readline()[:14] == '<?xml version=':
self.urlType='XML'
else:
self.urlType='Multipart'
def getCoverages(self, unpackdir='./unpacked'):
if self.urlType=='XML':
paths=[]
u_xml = self.u.read()
u_tree = etree.fromstring(u_xml)
for ref in u_tree.findall('{http://www.opengis.net/wcs/1.1}Coverage/{http://www.opengis.net/wcs/1.1}Reference'):
path = ref.attrib['{http://www.w3.org/1999/xlink}href']
paths.append(path)
for ref in u_tree.findall('{http://www.opengis.net/wcs/1.1.0/owcs}Coverage/{{http://www.opengis.net/wcs/1.1.0/owcs}Reference'):
path = ref.attrib['{http://www.w3.org/1999/xlink}href']
paths.append(path)
elif self.urlType=='Multipart':
#Decode multipart mime and return fileobjects
u_mpart=self.u.read()
mpart =MpartMime(u_mpart)
paths= mpart.unpackToDir(unpackdir)
return paths
class MpartMime(object):
def __init__ (self,mpartmime):
""" mpartmime is a multipart mime file that has already been read in."""
self.mpartmime=mpartmime
def unpackToDir(self, unpackdir):
""" unpacks contents of Multipart mime to a given directory"""
names=[]
#create the directory if it doesn't exist:
try:
os.mkdir(unpackdir)
except OSError as e:
# Ignore directory exists error
if e.errno != errno.EEXIST:
raise
#now walk through the multipart mime and write out files
msg = email.message_from_string(self.mpartmime)
counter =1
for part in msg.walk():
# multipart/* are just containers, ignore
if part.get_content_maintype() == 'multipart':
continue
# Applications should really check the given filename so that an
# email message can't be used to overwrite important files
filename = part.get_filename()
if not filename:
try:
ext = mimetypes.guess_extension(part.get_type())
except:
ext=None
if not ext:
# Use a generic extension
ext = '.bin'
filename = 'part-%03d%s' % (counter, ext)
fullpath=os.path.join(unpackdir, filename)
names.append(fullpath)
fp = open(fullpath, 'wb')
fp.write(part.get_payload(decode=True))
fp.close()
return names
|
b-cube/OwsCapable
|
owscapable/coverage/wcsdecoder.py
|
Python
|
bsd-3-clause
| 3,913
|
[
"NetCDF"
] |
2a4d48169e1a927df482a9fe2f225e45b5cf87235d30133a5ec9bac79cd615b6
|
##############################################################################
#
# Copyright (c) 2009-2013 by University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Open Software License version 3.0
# http://www.opensource.org/licenses/osl-3.0.php
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development since 2012 by School of Earth Sciences
#
##############################################################################
"""3D gravity inversion using netCDF data"""
# Filename for input data
DATASET = '${inversion-file}'
# maximum depth (in meters)
DEPTH = ${max-depth}
# buffer zone above data (in meters; 6-10km recommended)
AIR = ${air-buffer}
# number of mesh elements in vertical direction (~1 element per 2km recommended)
NE_Z = ${vertical-mesh-elements}
# amount of horizontal padding (this affects end result, about 20% recommended)
PAD_X = ${x-padding}
PAD_Y = ${y-padding}
N_THREADS = ${n-threads}
####### Do not change anything below this line #######
import os
import subprocess
import sys
try:
from esys.downunder import *
from esys.escript import unitsSI as U
from esys.weipa import saveSilo
except ImportError:
line=["/opt/escript/bin/run-escript","-t" + str(N_THREADS)]+sys.argv
ret=subprocess.call(line)
sys.exit(ret)
def saveAndUpload(fn, **args):
saveSilo(fn, **args)
subprocess.call(["cloud", "upload", fn, fn, "--set-acl=public-read"])
DATA_UNITS = 1e-6 * U.m/(U.sec**2)
source=NetCdfData(DataSource.GRAVITY, DATASET, scale_factor=DATA_UNITS)
db=DomainBuilder()
db.addSource(source)
db.setVerticalExtents(depth=DEPTH, air_layer=AIR, num_cells=NE_Z)
db.setFractionalPadding(PAD_X, PAD_Y)
db.fixDensityBelow(depth=DEPTH)
inv=GravityInversion()
inv.setup(db)
g, chi = db.getGravitySurveys()[0]
density=inv.run()
saveAndUpload('result.silo', gravity_anomaly=g, gravity_weight=chi, density=density)
print("Results saved in result.silo")
# Visualise result.silo using VisIt
import visit
visit.LaunchNowin()
saveatts = visit.SaveWindowAttributes()
saveatts.fileName = 'result-visit.png'
saveatts.family = 0
saveatts.width = 1024
saveatts.height = 768
saveatts.resConstraint = saveatts.NoConstraint
saveatts.outputToCurrentDirectory = 1
visit.SetSaveWindowAttributes(saveatts)
visit.OpenDatabase('result.silo')
visit.AddPlot('Contour', 'density')
c=visit.ContourAttributes()
c.colorType=c.ColorByColorTable
c.colorTableName = "hot"
visit.SetPlotOptions(c)
visit.DrawPlots()
v=visit.GetView3D()
v.viewNormal=(-0.554924, 0.703901, 0.443377)
v.viewUp=(0.272066, -0.3501, 0.896331)
visit.SetView3D(v)
visit.SaveWindow()
subprocess.call(["cloud", "upload", "result-visit.png", "result-visit.png", "--set-acl=public-read"])
visit.DeleteAllPlots()
visit.CloseDatabase('result.silo')
|
bencaradocdavies/vgml
|
src/main/resources/org/auscope/portal/server/scriptbuilder/templates/escript-gravity.py
|
Python
|
gpl-3.0
| 2,927
|
[
"NetCDF",
"VisIt"
] |
25b71368cef769a0c5dbdb7ae163dca20e7e4de8ad592c0a6fbbdc6ec0d60780
|
#!/usr/bin/env python
# provide a list of read sets and reference genomes
# generate a bias matrix
# python calculate_bias_matrix target_file < config
# config is space separated lines of the form
# sra,fasta
import itertools
import sys
import bias
BAM_TO_SAM="samtools view -h %s"
BEDTOOLS="bedtools"
BOWTIE_PATH="bowtie2"
BWA_PATH="/mnt/work/reference-bias/tools/bwa-0.7.12/bwa"
#BWA_PATH="bwa"
MAUVE_PATH="progressiveMauve"
SAMTOOLS="samtools"
def bias_matrix( refs, log, out ):
out.write( 'Donor\tReference\tSRA\tLow\tMid\tHigh\n' )
for donor, reference in itertools.product( refs, repeat=2 ):
log.write( 'Calculating donor {0} reference {1}\n'.format( donor, reference ) )
calculator = bias.Calculator( BWA_PATH, BOWTIE_PATH, MAUVE_PATH, BAM_TO_SAM, log, log )
low, mid, high = calculator.calculate( donor=donor[1], reference=reference[1], job=None, stage=None, tmpdir='./tmp', align='bwa', donorbam=None, donorsam=None, fastq=donor[0] )
#low, mid, high = 1, 2, 3
out.write( '{0}\t{1}\t{2}\t{3:.1f}\t{4:.1f}\t{5:.1f}\n'.format( donor[1], reference[1], donor[0], low, mid, high ) )
if __name__ == '__main__':
refs = []
for line in sys.stdin:
if line.startswith('#'):
continue
sra, ref = [ x.strip() for x in line.strip().split(',') ]
refs.append( (sra, ref) )
target_file = sys.argv[1]
m = bias_matrix( refs, sys.stderr, open( target_file, 'w' ) )
|
supernifty/reference-bias
|
bin/calculate_bias_matrix.py
|
Python
|
apache-2.0
| 1,410
|
[
"BWA"
] |
cefc832249a29118a8ee35dd6d467d383d4caf67c8d8d21c52fcb2d2400595f7
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for probability densities."""
import abc
import pickle
from annealed_flow_transport import train_vae
import annealed_flow_transport.aft_types as tp
import annealed_flow_transport.cox_process_utils as cp_utils
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import jax.scipy.linalg as slinalg
from jax.scipy.special import logsumexp
from jax.scipy.stats import multivariate_normal
from jax.scipy.stats import norm
import numpy as np
import tensorflow_datasets as tfds
# TypeDefs
NpArray = np.ndarray
Array = jnp.ndarray
ConfigDict = tp.ConfigDict
class LogDensity(metaclass=abc.ABCMeta):
"""Abstract base class from which all log densities should inherit."""
def __init__(self, config: ConfigDict, num_dim: int):
self._check_constructor_inputs(config, num_dim)
self._config = config
self._num_dim = num_dim
@abc.abstractmethod
def _check_constructor_inputs(self, config: ConfigDict, num_dim: int):
"""Check the config and number of dimensions of the class.
Will typically raise Assertion like errors.
Args:
config: Configuration for the log density.
num_dim: Number of dimensions expected for the density.
"""
def __call__(self, x: Array) -> Array:
"""Evaluate the log density with automatic shape checking.
This calls evaluate_log_density which needs to be implemented
in derived classes.
Args:
x: Array of shape (num_batch, num_dim) containing input points.
Returns:
Array of shape (num_batch,) with corresponding log densities.
"""
self._check_input_shape(x)
output = self.evaluate_log_density(x)
self._check_output_shape(x, output)
return output
@abc.abstractmethod
def evaluate_log_density(self, x: Array) -> Array:
"""Evaluate the log density.
Args:
x: Array of shape (num_batch, num_dim) containing input to log density
Returns:
Array of shape (num_batch,) containing values of log densities.
"""
def _check_input_shape(self, vector_in: Array):
chex.assert_shape(vector_in, (None, self._num_dim))
def _check_output_shape(self, vector_in: Array, vector_out: Array):
num_batch = vector_in.shape[0]
chex.assert_shape(vector_out, (num_batch,))
def _check_members_types(self, config: ConfigDict, expected_members_types):
for elem, elem_type in expected_members_types:
if elem not in config:
raise ValueError("LogDensity config element not found: ", elem)
if not isinstance(config[elem], elem_type):
msg = "LogDensity config element " + elem + " is not of type " + str(
elem_type)
raise TypeError(msg)
def _check_expected_num_dim(self,
num_dim: int,
expected_num_dim: int,
class_name: str):
"""In the case where num_dim has an expected static value, confirm this."""
if expected_num_dim != num_dim:
msg = "num_dim is expected to be "+str(expected_num_dim)
msg += " for density "+class_name
raise ValueError(msg)
class NormalDistribution(LogDensity):
"""A univariate normal distribution with configurable scale and location.
num_dim should be 1 and config should include scalars "loc" and "scale"
"""
def _check_constructor_inputs(self, config: ConfigDict, num_dim: int):
self._check_expected_num_dim(num_dim, 1, type(self).__name__)
expected_members_types = [("loc", float),
("scale", float),
]
self._check_members_types(config, expected_members_types)
def evaluate_log_density(self, x: Array) -> Array:
output = norm.logpdf(x,
loc=self._config.loc,
scale=self._config.scale)[:, 0]
return output
class MultivariateNormalDistribution(LogDensity):
"""A normalized multivariate normal distribution.
Each element of the mean vector has the same value config.shared_mean
Each element of the diagonal covariance matrix has value config.diagonal_cov
"""
def _check_constructor_inputs(self, config: ConfigDict, unused_dim: int):
expected_members_types = [("shared_mean", float),
("diagonal_cov", float)
]
self._check_members_types(config, expected_members_types)
def evaluate_log_density(self, x: Array) -> Array:
mean = jnp.ones(self._num_dim) * self._config.shared_mean
cov = jnp.diag(jnp.ones(self._num_dim) * self._config.diagonal_cov)
output = multivariate_normal.logpdf(x,
mean=mean,
cov=cov)
return output
class FunnelDistribution(LogDensity):
"""The funnel distribution from https://arxiv.org/abs/physics/0009028.
num_dim should be 10. config is unused in this case.
"""
def _check_constructor_inputs(self, unused_config: ConfigDict, num_dim: int):
self._check_expected_num_dim(num_dim, 10, type(self).__name__)
def evaluate_log_density(self, x: Array) -> Array:
def unbatched(x):
v = x[0]
log_density_v = norm.logpdf(v,
loc=0.,
scale=3.)
variance_other = jnp.exp(v)
other_dim = self._num_dim - 1
cov_other = jnp.eye(other_dim) * variance_other
mean_other = jnp.zeros(other_dim)
log_density_other = multivariate_normal.logpdf(x[1:],
mean=mean_other,
cov=cov_other)
chex.assert_equal_shape([log_density_v, log_density_other])
return log_density_v + log_density_other
output = jax.vmap(unbatched)(x)
return output
class LogGaussianCoxPines(LogDensity):
"""Log Gaussian Cox process posterior in 2D for pine saplings data.
This follows Heng et al 2020 https://arxiv.org/abs/1708.08396 .
config.file_path should point to a csv file of num_points columns
and 2 rows containg the Finnish pines data.
config.use_whitened is a boolean specifying whether or not to use a
reparameterization in terms of the Cholesky decomposition of the prior.
See Section G.4 of https://arxiv.org/abs/2102.07501 for more detail.
The experiments in the paper have this set to False.
num_dim should be the square of the lattice sites per dimension.
So for a 40 x 40 grid num_dim should be 1600.
"""
def __init__(self,
config: ConfigDict,
num_dim: int):
super().__init__(config, num_dim)
# Discretization is as in Controlled Sequential Monte Carlo
# by Heng et al 2017 https://arxiv.org/abs/1708.08396
self._num_latents = num_dim
self._num_grid_per_dim = int(np.sqrt(num_dim))
bin_counts = jnp.array(
cp_utils.get_bin_counts(self.get_pines_points(config.file_path),
self._num_grid_per_dim))
self._flat_bin_counts = jnp.reshape(bin_counts, (self._num_latents))
# This normalizes by the number of elements in the grid
self._poisson_a = 1./self._num_latents
# Parameters for LGCP are as estimated in Moller et al, 1998
# "Log Gaussian Cox processes" and are also used in Heng et al.
self._signal_variance = 1.91
self._beta = 1./33
self._bin_vals = cp_utils.get_bin_vals(self._num_grid_per_dim)
def short_kernel_func(x, y):
return cp_utils.kernel_func(x, y, self._signal_variance,
self._num_grid_per_dim, self._beta)
self._gram_matrix = cp_utils.gram(short_kernel_func, self._bin_vals)
self._cholesky_gram = jnp.linalg.cholesky(self._gram_matrix)
self._white_gaussian_log_normalizer = -0.5 * self._num_latents * jnp.log(
2. * jnp.pi)
half_log_det_gram = jnp.sum(jnp.log(jnp.abs(jnp.diag(self._cholesky_gram))))
self._unwhitened_gaussian_log_normalizer = -0.5 * self._num_latents * jnp.log(
2. * jnp.pi) - half_log_det_gram
# The mean function is a constant with value mu_zero.
self._mu_zero = jnp.log(126.) - 0.5*self._signal_variance
if self._config.use_whitened:
self._posterior_log_density = self.whitened_posterior_log_density
else:
self._posterior_log_density = self.unwhitened_posterior_log_density
def _check_constructor_inputs(self, config: ConfigDict, num_dim: int):
expected_members_types = [("use_whitened", bool)]
self._check_members_types(config, expected_members_types)
num_grid_per_dim = int(np.sqrt(num_dim))
if num_grid_per_dim * num_grid_per_dim != num_dim:
msg = ("num_dim needs to be a square number for LogGaussianCoxPines "
"density.")
raise ValueError(msg)
if not config.file_path:
msg = "Please specify a path in config for the Finnish pines data csv."
raise ValueError(msg)
def get_pines_points(self, file_path):
"""Get the pines data points."""
with open(file_path, "rt") as input_file:
b = np.genfromtxt(input_file, delimiter=",")
return b
def whitened_posterior_log_density(self, white: Array) -> Array:
quadratic_term = -0.5 * jnp.sum(white**2)
prior_log_density = self._white_gaussian_log_normalizer + quadratic_term
latent_function = cp_utils.get_latents_from_white(white, self._mu_zero,
self._cholesky_gram)
log_likelihood = cp_utils.poisson_process_log_likelihood(
latent_function, self._poisson_a, self._flat_bin_counts)
return prior_log_density + log_likelihood
def unwhitened_posterior_log_density(self, latents: Array) -> Array:
white = cp_utils.get_white_from_latents(latents, self._mu_zero,
self._cholesky_gram)
prior_log_density = -0.5 * jnp.sum(
white * white) + self._unwhitened_gaussian_log_normalizer
log_likelihood = cp_utils.poisson_process_log_likelihood(
latents, self._poisson_a, self._flat_bin_counts)
return prior_log_density + log_likelihood
def evaluate_log_density(self, x: Array) -> Array:
return jax.vmap(self._posterior_log_density)(x)
class ChallengingTwoDimensionalMixture(LogDensity):
"""A challenging mixture of Gaussians in two dimensions.
num_dim should be 2. config is unused in this case.
"""
def _check_constructor_inputs(self, unused_config: ConfigDict, num_dim: int):
self._check_expected_num_dim(num_dim, 2, type(self).__name__)
def raw_log_density(self, x: Array) -> Array:
"""A raw log density that we will then symmetrize."""
mean_a = jnp.array([3.0, 0.])
mean_b = jnp.array([-2.5, 0.])
mean_c = jnp.array([2.0, 3.0])
means = jnp.stack((mean_a, mean_b, mean_c), axis=0)
cov_a = jnp.array([[0.7, 0.], [0., 0.05]])
cov_b = jnp.array([[0.7, 0.], [0., 0.05]])
cov_c = jnp.array([[1.0, 0.95], [0.95, 1.0]])
covs = jnp.stack((cov_a, cov_b, cov_c), axis=0)
log_weights = jnp.log(jnp.array([1./3, 1./3., 1./3.]))
l = jnp.linalg.cholesky(covs)
y = slinalg.solve_triangular(l, x[None, :] - means, lower=True, trans=0)
mahalanobis_term = -1/2 * jnp.einsum("...i,...i->...", y, y)
n = means.shape[-1]
normalizing_term = -n / 2 * np.log(2 * np.pi) - jnp.log(
l.diagonal(axis1=-2, axis2=-1)).sum(axis=1)
individual_log_pdfs = mahalanobis_term + normalizing_term
mixture_weighted_pdfs = individual_log_pdfs + log_weights
return logsumexp(mixture_weighted_pdfs)
def make_2d_invariant(self, log_density, x: Array) -> Array:
density_a = log_density(x)
density_b = log_density(np.flip(x))
return jnp.logaddexp(density_a, density_b) - jnp.log(2)
def evaluate_log_density(self, x: Array) -> Array:
density_func = lambda x: self.make_2d_invariant(self.raw_log_density, x)
return jax.vmap(density_func)(x)
class AutoEncoderLikelihood(LogDensity):
"""Generative decoder log p(x,z| theta) as a function of latents z.
This evaluates log p(x,z| theta) = log p(x, z| theta ) + log p(z) for a VAE.
Here x is an binarized MNIST Image, z are real valued latents, theta denotes
the generator neural network parameters.
Since x is fixed and z is a random variable this is the log of an unnormalized
z density p(x, z | theta)
The normalizing constant is a marginal p(x | theta) = int p(x, z | theta) dz.
The normalized target density is the posterior over latents p(z|x, theta).
The likelihood uses a pretrained generator neural network.
It is contained in a pickle file specifed by config.params_filesname
A script producing such a pickle file can be found in train_vae.py
The resulting pretrained network used in the AFT paper
can be found at data/vae.pickle
The binarized MNIST test set image used is specfied by config.image_index
"""
def __init__(self, config: ConfigDict, num_dim: int):
super().__init__(config, num_dim)
self._vae_params = self._get_vae_params(config.params_filename)
test_batch_size = 1
test_ds = train_vae.load_dataset(tfds.Split.TEST, test_batch_size)
for unused_index in range(self._config.image_index):
unused_batch = next(test_ds)
self._test_image = next(test_ds)["image"]
assert self._test_image.shape[0] == 1 # Batch size needs to be 1.
assert self._test_image.shape[1:] == train_vae.MNIST_IMAGE_SHAPE
self.entropy_eval = hk.transform(self.cross_entropy_eval_func)
def _check_constructor_inputs(self, config: ConfigDict, num_dim: int):
self._check_expected_num_dim(num_dim, 30, type(self).__name__)
expected_members_types = [("params_filename", str),
("image_index", int)
]
num_mnist_test = 10000
in_range = config.image_index >= 0 and config.image_index < num_mnist_test
if not in_range:
msg = "VAE image_index must be greater than or equal to zero "
msg += "and strictly less than "+str(num_mnist_test)+"."
raise ValueError(msg)
def _get_vae_params(self, ckpt_filename):
with open(ckpt_filename, "rb") as f:
vae_params = pickle.load(f)
return vae_params
def cross_entropy_eval_func(self, data: Array, latent: Array) -> Array:
"""Evaluate the binary cross entropy for given latent and data.
Needs to be called within a Haiku transform.
Args:
data: Array of shape (1, image_shape)
latent: Array of shape (num_latent_dim,)
Returns:
Array, value of binary cross entropy for single data point in question.
"""
chex.assert_rank(latent, 1)
chex.assert_rank(data, 4) # Shape should be (1, 28, 28, 1) hence rank 4.
vae = train_vae.ConvVAE()
# New axis here required for batch size = 1 for VAE compatibility.
batch_latent = latent[None, :]
logits = vae.decoder(batch_latent)
chex.assert_equal_shape([logits, data])
return train_vae.binary_cross_entropy_from_logits(logits, data)
def log_prior(self, latent: Array) -> Array:
"""Latent shape (num_dim,) -> standard multivariate log density."""
chex.assert_rank(latent, 1)
log_norm_gaussian = -0.5*self._num_dim * jnp.log(2.*jnp.pi)
data_term = - 0.5 * jnp.sum(jnp.square(latent))
return data_term + log_norm_gaussian
def total_log_probability(self, latent: Array) -> Array:
chex.assert_rank(latent, 1)
log_prior = self.log_prior(latent)
dummy_rng_key = 0
# Data point log likelihood is negative of loss for batch size of 1.
log_likelihood = -1. * self.entropy_eval.apply(
self._vae_params, dummy_rng_key, self._test_image, latent)
total_log_probability = log_prior + log_likelihood
return total_log_probability
def evaluate_log_density(self, x: Array) -> Array:
return jax.vmap(self.total_log_probability)(x)
|
deepmind/annealed_flow_transport
|
annealed_flow_transport/densities.py
|
Python
|
apache-2.0
| 16,333
|
[
"Gaussian"
] |
cf0ea6415b5231b89c65bf1a0dbc88f261c0c14cf17a6944bcae0f2ec7906e42
|
# Copyright 2006 James Tauber and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from types import StringType
import os
import copy
from cStringIO import StringIO
import re
try:
from hashlib import md5
except:
from md5 import md5
import logging
import compiler
from compiler.visitor import ASTVisitor
from options import (all_compile_options, add_compile_options,
get_compile_options, debug_options, speed_options,
pythonic_options)
escaped_subst = re.compile('@{{(!?[ a-zA-Z0-9_\.]*)}}')
# See http://www.quackit.com/javascript/javascript_reserved_words.cfm
JavaScript_Reserved_Words = frozenset((
'break',
'case',
'comment',
'continue',
'default',
'delete',
'do',
'else',
'export',
'for',
'function',
'if',
'import',
'in',
'label',
'new',
'return',
'switch',
'this',
'typeof',
'var',
'void',
'while',
'with',
))
ECMAScipt_Reserved_Words = frozenset((
'catch',
'class',
'const',
'debugger',
'enum',
'extends',
'finally',
'super',
'throw',
'try',
))
Java_Keywords = frozenset((# (Reserved by JavaScript)
'abstract',
'boolean',
'byte',
'char',
'double',
'false',
'final',
'float',
'goto',
'implements',
'instanceOf',
'int',
'interface',
'long',
'native',
'null',
'package',
'private',
'protected',
'public',
'short',
'static',
'synchronized',
'throws',
'transient',
'true',
))
Other_JavaScript_Keywords = frozenset((
'Anchor',
'Area',
'Array',
'Boolean',
'Button',
'Checkbox',
'Date',
'Document',
'Element',
'FileUpload',
'Form',
'Frame',
'Function',
'Hidden',
'History',
'Image',
'Infinity',
'JavaArray',
'JavaClass',
'JavaObject',
'JavaPackage',
'Link',
'Location',
'Math',
'MimeType',
'NaN',
'Navigator',
'Number',
'Object',
'Option',
'Packages',
'Password',
'Plugin',
'Radio',
'RegExp',
'Reset',
'Select',
'String',
'Submit',
'Text',
'Textarea',
'Window',
'alert',
'arguments',
'assign',
'blur',
'callee',
'caller',
'captureEvents',
'clearInterval',
'clearTimeout',
'close',
'closed',
'confirm',
'constructor',
'defaultStatus',
'document',
'escape',
'eval',
'find',
'focus',
'frames',
'getClass',
'history',
'home',
'innerHeight',
'innerWidth',
'isFinite',
'isNan',
'java',
'length',
'location',
'locationbar',
'menubar',
'moveBy',
'moveTo',
'name',
'navigate',
'navigator',
'netscape',
'onBlur',
'onError',
'onFocus',
'onLoad',
'onUnload',
'open',
'opener',
'outerHeight',
'outerWidth',
'pageXoffset',
'pageYoffset',
'parent',
'parseFloat',
'parseInt',
'personalbar',
'print',
'prompt',
'prototype',
'ref',
'releaseEvents',
'resizeBy',
'resizeTo',
'routeEvent',
'scroll',
'scrollBy',
'scrollTo',
'scrollbars',
'self',
'setInterval',
'setTimeout',
'status',
'statusbar',
'stop',
'sun',
'taint',
'toString',
'toolbar',
'top',
'unescape',
'untaint',
'unwatch',
'valueOf',
'watch',
'window',
))
PYJSLIB_BUILTIN_FUNCTIONS=frozenset((
"__import__",
"abs",
"all",
"any",
"bool",
"callable",
"chr",
"cmp",
"delattr",
"dir",
"divmod",
"enumerate",
"filter",
"float",
"format",
"getattr",
"hasattr",
"hash",
"hex",
"isinstance",
"issubclass",
"iter",
"len",
"map",
"max",
"min",
"oct",
"open",
"ord",
"pow",
"range",
"reduce",
"repr",
"reversed",
"round",
"setattr",
"sorted",
"staticmethod",
"str",
"sum",
"super",
"type",
"xrange",
"zip",
# internal mappings needed
"__empty_dict",
"next_hash_id",
"__hash",
"wrapped_next",
"__iter_prepare",
"__wrapped_next",
"printFunc",
"debugReport",
"_isinstance",
"op_add",
"op_sub",
"isObject",
"toJSObjects",
"_errorMapping",
"TryElse",
"sprintf",
"get_pyjs_classtype",
"isUndefined",
"_create_class",
"_del",
"op_is",
"op_eq",
"op_or",
"op_and",
"op_uadd",
"op_usub",
"op_mul",
"op_div",
"op_truediv",
"op_pow",
"op_invert",
"op_bitshiftleft",
"op_bitshiftright",
"op_bitand2",
"op_bitand",
"op_bitxor",
"op_bitxor2",
"op_bitor2",
"op_bitor",
"op_floordiv",
"op_mod",
"__op_add",
"__op_sub",
"__setslice",
"slice",
"__delslice",
"___import___",
"__import_all__",
"_globals",
"_handle_exception",
))
PYJSLIB_BUILTIN_CLASSES=[
"ArithmeticError",
"AssertionError",
"AttributeError",
"BaseException",
"Exception",
"GeneratorExit",
"ImportError",
"IndexError",
"KeyError",
"KeyboardInterrupt",
"LookupError",
"NameError",
"NotImplemented", # is in fact an instance
"NotImplementedError",
"NotImplementedType",
"RuntimeError",
"StandardError",
"StopIteration",
"TypeError",
"ValueError",
"ZeroDivisionError",
"basestring",
"dict",
"frozenset",
"int",
"list",
"long",
"object",
"property",
"set",
"tuple",
]
PYJSLIB_BUILTIN_MAPPING = {\
'True' : 'true',
'False': 'false',
'None': 'null',
}
SCOPE_KEY = 0
BIND_TYPES_NUMERIC = {
"func": 0,
"bound": 1,
"class": 2,
"static": 3,
}
# Variable names that should be remapped in functions/methods
# arguments -> arguments_
# arguments_ -> arguments__
# etc.
# arguments is one of Other_JavaScript_Keywords, but is used
# in function/method initialization and therefore forbidden
pyjs_vars_remap_names = ['arguments',
'final', 'char'] # to pass lint
pyjs_vars_remap = {}
for a in pyjs_vars_remap_names:
pyjs_vars_remap[a] = '$$' + a
for a in JavaScript_Reserved_Words:
pyjs_vars_remap[a] = '$$' + a
for a in ECMAScipt_Reserved_Words:
pyjs_vars_remap[a] = '$$' + a
# Attributes that should be remapped in classes
pyjs_attrib_remap_names = [\
'prototype', 'call', 'apply', 'constructor',
# Specifically for Chrome, which doesn't set the name attribute of a _function_
# http://code.google.com/p/chromium/issues/detail?id=12871
'name',
# collisions between javascript/python
'split', 'replace',
]
pyjs_attrib_remap = {}
for a in pyjs_attrib_remap_names:
pyjs_attrib_remap[a] = '$$' + a
for a in JavaScript_Reserved_Words:
pyjs_attrib_remap[a] = '$$' + a
for a in ECMAScipt_Reserved_Words:
pyjs_attrib_remap[a] = '$$' + a
def bracket_fn(s):
return s # "(%s)" % s
# pass in the compiler module (lib2to3 pgen or "standard" python one)
# and patch transformer. see http://bugs.python.org/issue6978
def monkey_patch_broken_transformer(compiler):
if compiler.__name__ != 'compiler':
return # don't patch pgen.lib2to3.compiler.transformer!
# assumes that compiler.transformer imports all these
extractLineNo = compiler.transformer.extractLineNo
token = compiler.transformer.token
symbol = compiler.transformer.symbol
Subscript = compiler.transformer.Subscript
Tuple = compiler.transformer.Tuple
Ellipsis = compiler.transformer.Ellipsis
Sliceobj = compiler.transformer.Sliceobj
# Bugfix compiler.transformer.Transformer.com_subscriptlist
def com_subscriptlist(self, primary, nodelist, assigning):
# slicing: simple_slicing | extended_slicing
# simple_slicing: primary "[" short_slice "]"
# extended_slicing: primary "[" slice_list "]"
# slice_list: slice_item ("," slice_item)* [","]
# backwards compat slice for '[i:j]'
if len(nodelist) == 2:
sub = nodelist[1]
if (sub[1][0] == token.COLON or \
(len(sub) > 2 and sub[2][0] == token.COLON)) and \
sub[-1][0] != symbol.sliceop:
return self.com_slice(primary, sub, assigning)
subscripts = []
for i in range(1, len(nodelist), 2):
subscripts.append(self.com_subscript(nodelist[i]))
if len(nodelist) > 2:
tulplesub = [sub for sub in subscripts \
if not (isinstance(sub, Ellipsis) or \
isinstance(sub, Sliceobj))]
if len(tulplesub) == len(subscripts):
subscripts = [Tuple(subscripts)]
return Subscript(primary, assigning, subscripts,
lineno=extractLineNo(nodelist))
compiler.transformer.Transformer.com_subscriptlist = com_subscriptlist
re_return = re.compile(r'\breturn\b')
class __Pyjamas__(object):
console = "console"
native_js_funcs = []
@classmethod
def register_native_js_func(cls, name, func):
def native(self, translator, node, current_klass, is_statement=False):
if len(node.args) != 1:
raise TranslationError(
"%s function requires one argument" % name,
node.node)
if ( isinstance(node.args[0], translator.ast.Const)
and isinstance(node.args[0].value, str)
):
translator.ignore_debug = True
unescape = lambda content: translator.translate_escaped_names(content, current_klass)
converted = func(node.args[0].value, unescape=unescape, translator=translator, current_klass=current_klass, is_statement=is_statement)
return converted, re_return.search(converted) is not None
else:
raise TranslationError(
"%s function only supports constant strings" % name,
node.node)
cls.native_js_funcs.append(name)
setattr(cls, name, native)
def wnd(self, translator, node, *args, **kwargs):
if len(node.args) != 0:
raise TranslationError(
"wnd function doesn't support arguments",
node.node)
translator.ignore_debug = True
return '$wnd', False
def doc(self, translator, node, *args, **kwargs):
if len(node.args) != 0:
raise TranslationError(
"doc function doesn't support arguments",
node.node)
translator.ignore_debug = True
return '$doc', False
def jsinclude(self, translator, node, *args, **kwargs):
if len(node.args) != 1:
raise TranslationError(
"jsinclude function requires one argument",
node.node)
if ( isinstance(node.args[0], translator.ast.Const)
and isinstance(node.args[0].value, str)
):
try:
data = open(node.args[0].value, 'r').read()
except IOError, e:
raise TranslationError(
"Cannot include file '%s': %s" % (node.args[0].value, e), node.node)
translator.ignore_debug = True
return data, False
else:
raise TranslationError(
"jsinclude function only supports constant strings",
node.node)
def jsimport(self, translator, node, *args, **kwargs):
# jsimport(path, mode, location)
# mode = [default|static|dynamic] (default: depends on build argument -m)
# location = [early|middle|late] (only relevant for static)
if len(node.args) == 0 or len(node.args) > 3:
raise TranslationError(
"jsimport function requires at least one, and at most three arguments",
node.node)
for arg in node.args:
if not isinstance(arg, translator.ast.Const):
raise TranslationError(
"jsimport function only supports constant arguments",
node.node)
if not isinstance(node.args[0].value, str):
raise TranslationError(
"jsimport path argument must be a string",
node.node)
path = node.args[0].value
if len(node.args) < 2:
mode = 'default'
else:
if isinstance(node.args[1].value, str):
mode = node.args[1].value
else:
raise TranslationError(
"jsimport path argument must be a string",
node.node)
if not mode in ['default', 'static', 'dynamic']:
raise TranslationError(
"jsimport mode argument must be default, static or dynamic",
node.node)
if len(node.args) < 3:
location = 'middle'
else:
if isinstance(node.args[2].value, str):
location = node.args[2].value
else:
raise TranslationError(
"jsimport path argument must be a string",
node.node)
if not location in ['early', 'middle', 'late']:
raise TranslationError(
"jsimport location argument must be early, middle or late",
node.node)
translator.add_imported_js(path, mode, location)
translator.ignore_debug = True
return '', False
def debugger(self, translator, node, *args, **kwargs):
if len(node.args) != 0:
raise TranslationError(
"debugger function doesn't support arguments",
node.node)
translator.ignore_debug = True
return 'debugger', False
def setCompilerOptions(self, translator, node, *args, **kwargs):
global speed_options, pythonic_options
for arg in node.args:
if not isinstance(arg, translator.ast.Const) or not isinstance(arg.value, str):
raise TranslationError(
"jsimport function only supports constant string arguments",
node.node)
option = arg.value
if translator.decorator_compiler_options.has_key(option):
for var, val in translator.decorator_compiler_options[option]:
setattr(translator, var, val)
elif option == "Speed":
for var in speed_options:
setattr(translator, var, speed_options[var])
elif option == "Strict":
for var in pythonic_options:
setattr(translator, var, pythonic_options[var])
else:
raise TranslationError(
"setCompilerOptions invalid option '%s'" % option,
node.node)
translator.ignore_debug = True
return '', False
def INT(self, translator, node, *args, **kwargs):
if len(node.args) != 1:
raise TranslationError(
"INT function requires one argument",
node.node)
expr = translator.expr(node.args[0], None)
opt_var = translator.decorator_compiler_options['NumberClasses'][0][0]
if getattr(translator, opt_var):
return "new $p['int'](%s)" % expr, False
return expr, False
def native_js_func(func):
__Pyjamas__.register_native_js_func(func.__name__, func)
return func
@native_js_func
def JS(content, unescape, **kwargs):
return unescape(content)
__pyjamas__ = __Pyjamas__()
class __Future__(object):
def division(self, translator):
translator.future_division = True
__future__ = __Future__()
# This is taken from the django project.
# Escape every ASCII character with a value less than 32.
JS_ESCAPES = (
('\\', r'\x5C'),
('\'', r'\x27'),
('"', r'\x22'),
('>', r'\x3E'),
('<', r'\x3C'),
('&', r'\x26'),
(';', r'\x3B')
) + tuple([('%c' % z, '\\x%02X' % z) for z in range(32)])
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
for bad, good in JS_ESCAPES:
value = value.replace(bad, good)
return value
class YieldVisitor(ASTVisitor):
has_yield = False
def visitYield(self, node, *args):
self.has_yield = True
class GeneratorExitVisitor(YieldVisitor):
has_yield = False
def visitReturn(self, node, *args):
self.has_yield = True
class Klass:
klasses = {}
def __init__(self, name, name_scope):
self.name = name
self.name_scope = name_scope
self.klasses[name] = self
self.functions = set()
def set_base(self, base_name):
self.base = self.klasses.get(base_name)
def add_function(self, function_name):
self.functions.add(function_name)
class TranslationError(Exception):
def __init__(self, msg, node='', module_name=''):
if node:
lineno = node.lineno
else:
lineno = "Unknown"
self.msg = msg
self.node = node
self.module_name = module_name
self.lineno = lineno
Exception.__init__(self, "%s line %s:\n%s\n%s" % (module_name, lineno, msg, node))
def __str__(self):
return self.args[0]
def strip_py(name):
return name
class Translator(object):
decorator_compiler_options = {\
'Debug': [('debug', True)],
'noDebug': [('debug', False)],
'PrintStatements': [('print_statements', True)],
'noPrintStatements': [('print_statements', False)],
'FunctionArgumentChecking': [('function_argument_checking', True)],
'noFunctionArgumentChecking': [('function_argument_checking', False)],
'AttributeChecking': [('attribute_checking', True)],
'noAttributeChecking': [('attribute_checking', False)],
'GetattrSupport': [('getattr_support', True)],
'noGetattrSupport': [('getattr_support', False)],
'BoundMethods': [('bound_methods', True)],
'noBoundMethods': [('bound_methods', False)],
'Descriptors': [('descriptors', True)],
'noDescriptors': [('descriptors', False)],
'SourceTracking': [('source_tracking', True)],
'noSourceTracking': [('source_tracking', False)],
'LineTracking': [('line_tracking', True)],
'noLineTracking': [('line_tracking', False)],
'StoreSource': [('store_source', True)],
'noStoreSource': [('store_source', False)],
'noInlineBool': [('inline_bool', False)],
'InlineBool': [('inline_bool', True)],
'noInlineLen': [('inline_len', False)],
'InlineLen': [('inline_len', True)],
'noInlineEq': [('inline_eq', False)],
'InlineEq': [('inline_eq', True)],
'noInlineCmp': [('inline_cmp', False)],
'InlineCmp': [('inline_cmp', True)],
'noInlineGetItem': [('inline_getitem', False)],
'InlineGetItem': [('inline_getitem', True)],
'noInlineCode': [('inline_bool', False),('inline_len', False),('inline_eq', False), ('inline_cmp', False), ('inline_getitem', False)],
'InlineCode': [('inline_bool', True),('inline_len', True),('inline_eq', True), ('inline_cmp', True), ('inline_getitem', True)],
'noOperatorFuncs': [('operator_funcs', False)],
'OperatorFuncs': [('operator_funcs', True)],
'noNumberClasses': [('number_classes', False)],
'NumberClasses': [('number_classes', True)],
}
def __init__(self, compiler,
module_name, module_file_name, src, mod, output,
dynamic=0, findFile=None, **kw):
monkey_patch_broken_transformer(compiler)
self.compiler = compiler
self.ast = compiler.ast
self.js_module_name = self.jsname("variable", module_name)
if module_name:
self.module_prefix = "$m."
else:
self.module_prefix = ""
self.module_name = module_name
src = src.replace("\r\n", "\n")
src = src.replace("\n\r", "\n")
src = src.replace("\r", "\n")
self.src = src.split("\n")
self.output = output
self.dynamic = dynamic
self.findFile = findFile
self.set_compile_options(kw)
# compile options
self.future_division = False
self.imported_modules = []
self.imported_js = []
self.is_class_definition = False
self.local_prefix = None
self.track_lines = {}
self.stacksize_depth = 0
self.option_stack = []
self.lookup_stack = [{}]
self.indent_level = 0
self.__unique_ids__ = {}
self.try_depth = -1
self.is_generator = False
self.generator_states = []
self.state_max_depth = len(self.generator_states)
self.constant_int = {}
self.constant_long = {}
self.top_level = True
PYJSLIB_BUILTIN_MAPPING['__file__'] = "'%s'" % module_file_name
self.w( self.spacing() + "/* start module: %s */" % module_name)
if not '.' in module_name:
#if module_name != self.jsname(module_name):
# raise TranslationError(
# "reserved word used for top-level module %r" % module_name,
# mod, self.module_name)
if self.js_module_name in ['pyjslib', 'sys']:
self.w( self.spacing() + 'var %s;' % self.js_module_name)
self.parent_module_name = None
else:
self.parent_module_name = '.'.join(module_name.split('.')[:-1])
if module_file_name.endswith('__init__.py'):
self.import_context = "'%s'" % module_name
self.relative_import_context = module_name
elif self.parent_module_name:
self.import_context = "'%s'" % self.parent_module_name
self.relative_import_context = self.parent_module_name
else:
self.import_context = "null"
self.relative_import_context = None
self.w( self.indent() + "$pyjs.loaded_modules['%s'] = function (__mod_name__) {" % module_name)
self.w( self.spacing() + "if($pyjs.loaded_modules['%s'].__was_initialized__) return $pyjs.loaded_modules['%s'];"% (module_name, module_name))
if self.parent_module_name:
self.w( self.spacing() + "if(typeof $pyjs.loaded_modules['%s'] == 'undefined' || !$pyjs.loaded_modules['%s'].__was_initialized__) @{{___import___}}('%s', null);"% (self.parent_module_name, self.parent_module_name, self.parent_module_name))
parts = self.js_module_name.split('.')
if len(parts) > 1:
self.w( self.spacing() + 'var %s = $pyjs.loaded_modules["%s"];' % (parts[0], module_name.split('.')[0]))
if self.js_module_name in ['pyjslib', 'sys']:
self.w( self.spacing() + 'var %s = %s = $pyjs.loaded_modules["%s"];' % (self.module_prefix[:-1], self.js_module_name, module_name,))
else:
self.w( self.spacing() + 'var %s = $pyjs.loaded_modules["%s"];' % (self.module_prefix[:-1], module_name,))
self.w( self.spacing() + self.module_prefix + '__repr__ = function() { return "<module: %s>"; };' % (module_name))
self.w( self.spacing() + self.module_prefix + "__was_initialized__ = true;")
self.w( self.spacing() + "if ((__mod_name__ === null) || (typeof __mod_name__ == 'undefined')) __mod_name__ = '%s';" % (module_name))
lhs = self.scopeName('__name__', 0, False)
self.w( self.spacing() + "%s = __mod_name__;" % (lhs))
if self.source_tracking:
self.w( self.spacing() + "%s__track_lines__ = new Array();" % self.module_prefix)
name = module_name.split(".")
if len(name) > 1:
jsname = self.jsname('variable', name[-1])
self.w( self.spacing() + "$pyjs.loaded_modules['%s']['%s'] = $pyjs.loaded_modules['%s'];" % (
'.'.join(name[:-1]), jsname, module_name,
))
if self.attribute_checking and not module_name in ['sys', 'pyjslib']:
attribute_checking = True
self.w( self.indent() + 'try {')
else:
attribute_checking = False
save_output = self.output
self.output = StringIO()
mod.lineno = 1
self.track_lineno(mod, True)
for child in mod.node:
self.has_js_return = False
self.has_yield = False
self.is_generator = False
self.track_lineno(child)
assert self.top_level
if isinstance(child, self.ast.Function):
self._function(child, None)
elif isinstance(child, self.ast.Class):
self._class(child)
elif isinstance(child, self.ast.Import):
self._import(child, None, True)
elif isinstance(child, self.ast.From):
self._from(child, None, True)
elif isinstance(child, self.ast.Discard):
self._discard(child, None)
elif isinstance(child, self.ast.Assign):
self._assign(child, None)
elif isinstance(child, self.ast.AugAssign):
self._augassign(child, None)
elif isinstance(child, self.ast.If):
self._if(child, None)
elif isinstance(child, self.ast.For):
self._for(child, None)
elif isinstance(child, self.ast.While):
self._while(child, None)
elif isinstance(child, self.ast.Subscript):
self._subscript_stmt(child, None)
elif isinstance(child, self.ast.Global):
self._global(child, None)
elif isinstance(child, self.ast.Printnl):
self._print(child, None)
elif isinstance(child, self.ast.Print):
self._print(child, None)
elif isinstance(child, self.ast.TryExcept):
self._tryExcept(child, None)
elif isinstance(child, self.ast.TryFinally):
self._tryFinally(child, None)
elif isinstance(child, self.ast.Raise):
self._raise(child, None)
elif isinstance(child, self.ast.Stmt):
self._stmt(child, None, True)
elif isinstance(child, self.ast.AssAttr):
self._assattr(child, None)
elif isinstance(child, self.ast.AssName):
self._assname(child, None)
elif isinstance(child, self.ast.AssTuple):
for node in child.nodes:
self._stmt(node, None)
elif isinstance(child, self.ast.Slice):
self.w( self.spacing() + self._slice(child, None))
else:
raise TranslationError(
"unsupported type (in __init__)",
child, self.module_name)
captured_output = self.output.getvalue()
self.output = save_output
if self.source_tracking and self.store_source:
for l in self.track_lines.keys():
self.w( self.spacing() + '''%s__track_lines__[%d] = "%s";''' % (self.module_prefix, l, self.track_lines[l].replace('"', '\"')), translate=False)
self.w( self.local_js_vars_decl([]))
if captured_output.find("@CONSTANT_DECLARATION@") >= 0:
captured_output = captured_output.replace("@CONSTANT_DECLARATION@", self.constant_decl())
else:
self.w( self.constant_decl())
if captured_output.find("@ATTRIB_REMAP_DECLARATION@") >= 0:
captured_output = captured_output.replace("@ATTRIB_REMAP_DECLARATION@", self.attrib_remap_decl())
self.w( captured_output, False)
if attribute_checking:
self.w( self.dedent() + "} catch ($pyjs_attr_err) {throw @{{_errorMapping}}($pyjs_attr_err);};")
self.w( self.spacing() + "return this;")
self.w( self.dedent() + "}; /* end %s */" % module_name)
self.w( "\n")
self.w( self.spacing() + "/* end module: %s */" % module_name)
self.w( "\n")
# print out the deps and check for wrong imports
if self.imported_modules:
self.w( '/*')
self.w( 'PYJS_DEPS: %s' % self.imported_modules)
self.w( '*/')
# print out the imported js
if self.imported_js:
self.w( '/*')
self.w( 'PYJS_JS: %s' % repr(self.imported_js))
self.w( '*/')
def set_compile_options(self, opts):
opts = dict(all_compile_options, **opts)
for opt, value in opts.iteritems():
if opt in all_compile_options:
setattr(self, opt, value)
else:
raise Exception("Translator got an unknown option %s" % opt)
self.ignore_debug = False
self.inline_bool = self.inline_code
self.inline_len = self.inline_code
self.inline_eq = self.inline_code
self.inline_cmp = self.inline_code
self.inline_getitem = self.inline_code
if self.number_classes:
self.operator_funcs = True
def w(self, txt, newline=True, output=None, translate=True):
if translate and txt:
txt = self.translate_escaped_names(txt, None) # TODO: current_klss
output = output or self.output
assert(isinstance(newline, bool))
if newline:
if txt is None:
print >> self.output
return
print >> self.output, txt
else:
print >> self.output, txt,
def uniqid(self, prefix = ""):
if not self.__unique_ids__.has_key(prefix):
self.__unique_ids__[prefix] = 0
self.__unique_ids__[prefix] += 1
return "%s%d" % (prefix, self.__unique_ids__[prefix])
def spacing(self):
return "\t" * self.indent_level
def indent(self):
spacing = self.spacing()
self.indent_level += 1
return spacing
def dedent(self):
if self.indent_level == 0:
raise TranslationError("Dedent error", None, self.module_name)
self.indent_level -= 1
return self.spacing()
def push_options(self):
self.option_stack.append((\
self.debug, self.print_statements, self.function_argument_checking,
self.attribute_checking, self.getattr_support, self.bound_methods, self.descriptors,
self.source_tracking, self.line_tracking, self.store_source,
self.inline_bool, self.inline_eq, self.inline_len, self.inline_cmp, self.inline_getitem,
self.operator_funcs, self.number_classes,
))
def pop_options(self):
(\
self.debug, self.print_statements, self.function_argument_checking,
self.attribute_checking, self.getattr_support, self.bound_methods, self.descriptors,
self.source_tracking, self.line_tracking, self.store_source,
self.inline_bool, self.inline_eq, self.inline_len, self.inline_cmp, self.inline_getitem,
self.operator_funcs, self.number_classes,
) = self.option_stack.pop()
def parse_decorators(self, node, funcname, current_class = None,
is_method = False, bind_type = None):
if node.decorators is None:
return False, False, '%s'
self.push_lookup()
self.add_lookup('variable', '%s', '%s')
code = '%s'
staticmethod = False
classmethod = False
lineno=node.lineno
if is_method:
bind_type = bind_type or "bound"
def add_callfunc(code, d, generic=True):
tnode = self.ast.CallFunc(d, [self.ast.Name('%s')],
star_args=None,
dstar_args=None,
lineno=lineno)
code = code % self._callfunc_code(tnode, None)
if is_method and (bind_type == "bound") and generic:
try:
bind_type_num = BIND_TYPES_NUMERIC[bind_type]
except KeyError:
raise TranslationError("Unknown bind type: %s" % bind_type, node)
code = "$pyjs__decorated_method('%(method_name)s', %(code)s, %(bind_type)s)" % \
{
"method_name": node.name,
"code": code,
"bind_type": bind_type_num
}
return code
for d in node.decorators:
if isinstance(d, self.ast.Getattr):
if isinstance(d.expr, self.ast.Name):
if d.expr.name == 'compiler':
raise TranslationError(
"The @compiler decorator is deprecated. Use from __pyjamas__ import setCompilerOptions", node, self.module_name)
if d.attrname in ("setter", "getter", "deleter"):
code = add_callfunc(code, d, generic=False)
else:
code = add_callfunc(code, d)
else:
code = add_callfunc(code, d)
elif isinstance(d, self.ast.Name):
if d.name == 'staticmethod':
staticmethod = True
elif d.name == 'classmethod':
classmethod = True
elif d.name == 'property':
code = add_callfunc(code, d, generic=False)
else:
code = add_callfunc(code, d)
else:
raise TranslationError(
"Unsupported decorator '%s'" % d, node, self.module_name)
self.pop_lookup()
if code != '%s':
code = code % "@{{staticmethod}}(%s)"
if staticmethod:
code = "@{{staticmethod}}(%s)" % code
return (staticmethod, classmethod, code)
# Join an list into a variable with optional attributes
def attrib_join(self, splitted):
if not isinstance(splitted, list):
raise TranslationError("Invalid splitted attr '%s'" % splitted)
attr = []
if splitted[0][0] in ["'", '"']:
attr.append(splitted[0][1:-1])
else:
attr.append(splitted[0])
for word in splitted[1:]:
if word[0] in ["'", '"']:
word = word[1:-1]
if word in pyjs_attrib_remap:
attr.append("'%s'" % pyjs_attrib_remap[word])
elif word.find('(') >= 0:
print 'attrib_join:', splitted, attr, word
attr.append(word)
else:
attr.append("'%s'" % word)
if len(attr) == 1:
return attr[0]
return "%s%s" % (attr[0], ('[' + "][".join(attr[1:]) + ']'))
def vars_remap(self, word):
if word in pyjs_vars_remap:
return pyjs_vars_remap[word]
return word
# Map a word to a valid attribute
def attrib_remap(self, word):
attr = []
words = word.split('.')
if len(words) == 1:
if word in pyjs_attrib_remap:
return pyjs_attrib_remap[word]
return word
raise RuntimeError("attrib_remap %s" % words)
def push_lookup(self, scope = None):
if scope is None:
scope = {}
self.lookup_stack.append(scope)
def pop_lookup(self):
return self.lookup_stack.pop()
def jsname(self, name_type, jsname):
words = jsname.split('.')
if name_type != 'builtin':
words[0] = self.vars_remap(words[0])
if len(words) == 0:
return words[0]
return self.attrib_join(words)
def add_lookup(self, name_type, pyname, jsname, depth = -1):
jsname = self.jsname(name_type, jsname)
if self.local_prefix is not None:
if jsname.find(self.local_prefix) != 0:
jsname = self.jsname(name_type, "%s.%s" % (self.local_prefix, jsname))
if self.lookup_stack[depth].has_key(pyname):
name_type = self.lookup_stack[depth][pyname][0]
if self.module_name != 'pyjslib' or pyname != 'int':
self.lookup_stack[depth][pyname] = (name_type, pyname, jsname)
return jsname
def lookup(self, name):
# builtin
# import
# class
# function
# variable
name_type = None
pyname = name
jsname = None
max_depth = depth = len(self.lookup_stack) - 1
while depth >= 0:
if self.lookup_stack[depth].has_key(name):
name_type, pyname, jsname = self.lookup_stack[depth][name]
break
depth -= 1
if depth < 0:
if name in PYJSLIB_BUILTIN_FUNCTIONS:
name_type = 'builtin'
pyname = name
jsname = self.jsname("variable", "$p['%s']" % self.attrib_remap(name))
elif name in PYJSLIB_BUILTIN_CLASSES:
name_type = 'builtin'
pyname = name
if not self.number_classes:
if pyname in ['int', 'long']:
name = 'float_int'
jsname = self.jsname("variable", "$p['%s']" % self.attrib_remap(name))
elif PYJSLIB_BUILTIN_MAPPING.has_key(name):
name_type = 'builtin'
pyname = name
jsname = PYJSLIB_BUILTIN_MAPPING[name]
is_local = (name_type is not None) and \
(max_depth > 0) and (max_depth == depth)
#if self.create_locals:
# print "lookup", name_type, pyname, jsname, depth, is_local
#if self.create_locals and is_local and \
# self.is_local_name(jsname, pyname, name_type, []):
#if depth == max_depth and jsname is not None and name_type not in \
# ['builtin', '__pyjamas__', '__javascript__', 'global']:
# print "name_type", name_type, jsname
# jsname = "$l." + jsname
return (name_type, pyname, jsname, depth, (name_type is not None) and (max_depth > 0) and (max_depth == depth))
def translate_escaped_names(self, txt, current_klass):
""" escape replace names
"""
l = escaped_subst.split(txt)
txt = l[0]
for i in xrange(1, len(l)-1, 2):
varname = l[i].strip()
if varname.startswith('!'):
txt += varname[1:]
else:
name_type, pyname, jsname, depth, is_local = self.lookup(varname)
if name_type is None:
substname = self.scopeName(varname, depth, is_local)
else:
substname = jsname
txt += substname
txt += l[i+1]
return txt
def scopeName(self, name, depth, local):
if local:
return name
while depth >= 0:
scopeName = self.lookup_stack[depth].get(SCOPE_KEY, None)
if scopeName is not None:
return scopeName + name
depth -= 1
return self.modpfx() + name
def attrib_remap_decl(self):
s = self.spacing()
lines = []
module_prefix = self.module_prefix
remap = pyjs_attrib_remap.keys()
remap.sort()
lines.append("%(s)svar attrib_remap = %(module_prefix)sattrib_remap = %(remap)s;" % locals())
remap = pyjs_vars_remap.keys()
remap.sort()
lines.append("%(s)svar var_remap = %(module_prefix)svar_remap = %(remap)s;" % locals())
return "\n".join(lines)
def constant_decl(self):
s = self.spacing()
lines = []
for name in self.constant_int:
lines.append("%(s)svar $constant_int_%(name)s = new $p['int'](%(name)s);" % locals())
for name in self.constant_long:
lines.append("%(s)svar $constant_long_%(name)s = new $p['long'](%(name)s);" % locals())
return "\n".join(lines)
def is_local_name(self, jsname, pyname, nametype, ignore_py_vars):
return ( not jsname.find('[') >= 0
and not pyname in ignore_py_vars
and not nametype in ['__pyjamas__', '__javascript__', 'global']
)
def local_js_vars_decl(self, ignore_py_vars):
names = []
for name in self.lookup_stack[-1].keys():
nametype = self.lookup_stack[-1][name][0]
pyname = self.lookup_stack[-1][name][1]
jsname = self.lookup_stack[-1][name][2]
if self.is_local_name(jsname, pyname, nametype, ignore_py_vars):
names.append(jsname)
if len(names) > 0:
return self.spacing() + "var %s;" % ','.join(names)
return ''
def add_imported_js(self, path, mode, location):
self.imported_js.append((path, mode, location))
def add_imported_module(self, importName):
names = importName.split(".")
if not importName in self.imported_modules:
self.imported_modules.append(importName)
if importName.endswith('.js'):
return
# Add all parent modules
_importName = ''
for name in names:
_importName += name
if not _importName in self.imported_modules:
self.imported_modules.append(_importName)
_importName += '.'
__inline_bool_code_str = """\
((%(v)s=%(e)s) === null || %(v)s === false || %(v)s === 0 || %(v)s === '' ?
false :
(typeof %(v)s=='object'?
(typeof %(v)s.__nonzero__=='function'?
%(v)s.__nonzero__() :
(typeof %(v)s.__len__=='function'?
(%(v)s.__len__()>0 ?
true :
false) :
true ) ) :
true ) )"""
__inline_bool_code_str = __inline_bool_code_str.replace(" ", "\t").replace("\n", "\n%(s)s")
def inline_bool_code(self, e):
if self.stupid_mode:
return bracket_fn(e)
if self.inline_bool:
v = self.uniqid('$bool')
self.add_lookup('variable', v, v)
s = self.spacing()
return self.__inline_bool_code_str % locals()
return "$p['bool'](%(e)s)" % locals()
__inline_len_code_str1 = """((%(v)s=%(e)s) === null?%(zero)s:
(typeof %(v)s.__array != 'undefined' ? %(v)s.__array.length:
(typeof %(v)s.__len__ == 'function'?%(v)s.__len__():
(typeof %(v)s.length != 'undefined'?%(v)s.length:
@{{len}}(%(v)s)))))"""
__inline_len_code_str1 = __inline_len_code_str1.replace(" ", "\t").replace("\n", "\n%(s)s")
__inline_len_code_str2 = """((%(v)s=%(e)s) === null?%(zero)s:
(typeof %(v)s.__array != 'undefined' ? new $p['int'](%(v)s.__array.length):
(typeof %(v)s.__len__ == 'function'?%(v)s.__len__():
(typeof %(v)s.length != 'undefined'? new $p['int'](%(v)s.length):
@{{len}}(%(v)s)))))"""
__inline_len_code_str2 = __inline_len_code_str2.replace(" ", "\t").replace("\n", "\n%(s)s")
def inline_len_code(self, e):
if self.inline_len:
v = self.uniqid('$len')
self.add_lookup('variable', v, v)
zero = '0'
s = self.spacing()
if not self.number_classes:
return self.__inline_len_code_str1 % locals()
self.constant_int['0'] = 1
zero = "$constant_int_0"
return self.__inline_len_code_str2 % locals()
return "@{{len}}(%(e)s)" % locals()
__inline_eq_code_str = """((%(v1)s=%(e1)s)===(%(v2)s=%(e2)s)&&%(v1)s===null?true:
(%(v1)s===null?false:(%(v2)s===null?false:
((typeof %(v1)s=='object'||typeof %(v1)s=='function')&&typeof %(v1)s.__cmp__=='function'?%(v1)s.__cmp__(%(v2)s) === 0:
((typeof %(v2)s=='object'||typeof %(v2)s=='function')&&typeof %(v2)s.__cmp__=='function'?%(v2)s.__cmp__(%(v1)s) === 0:
%(v1)s==%(v2)s)))))"""
__inline_eq_code_str = __inline_eq_code_str.replace(" ", "\t").replace("\n", "\n%(s)s")
def inline_eq_code(self, e1, e2):
if self.inline_eq and not self.number_classes:
v1 = self.uniqid('$eq')
v2 = self.uniqid('$eq')
self.add_lookup('variable', v1, v1)
self.add_lookup('variable', v2, v2)
s = self.spacing()
return self.__inline_eq_code_str % locals()
return "@{{op_eq}}(%(e1)s, %(e2)s)" % locals()
__inline_cmp_code_str = """((%(v1)s=%(e1)s)===(%(v2)s=%(e2)s)?0:
(typeof %(v1)s==typeof %(v2)s && ((typeof %(v1)s == 'number')||(typeof %(v1)s == 'string')||(typeof %(v1)s == 'boolean'))?
(%(v1)s == %(v2)s ? 0 : (%(v1)s < %(v2)s ? -1 : 1)):
@{{cmp}}(%(v1)s, %(v2)s)))"""
__inline_cmp_code_str = __inline_cmp_code_str.replace(" ", "\t").replace("\n", "\n%(s)s")
def inline_cmp_code(self, e1, e2):
if self.inline_cmp:
v1 = self.uniqid('$cmp')
v2 = self.uniqid('$cmp')
self.add_lookup('variable', v1, v1)
self.add_lookup('variable', v2, v2)
s = self.spacing()
return self.__inline_cmp_code_str % locals()
return "@{{cmp}}(%(e1)s, %(e2)s)" % locals()
__inline_getitem_code_str = """(typeof (%(v1)s=%(e)s).__array != 'undefined'?
((typeof %(v1)s.__array[%(v2)s=%(i)s]) != 'undefined'?%(v1)s.__array[%(v2)s]:
%(v1)s.__getitem__(%(v2)s)):
%(v1)s.__getitem__(%(i)s))"""
__inline_getitem_code_str = __inline_getitem_code_str.replace(" ", "\t").replace("\n", "\n%(s)s")
def inline_getitem_code(self, e, i):
if self.inline_getitem:
v1 = self.uniqid('$')
self.add_lookup('variable', v1, v1)
v2 = self.uniqid('$')
self.add_lookup('variable', v2, v2)
s = self.spacing()
return self.__inline_getitem_code_str % locals()
return "%(e)s.__getitem__(%(i)s)" % locals()
def md5(self, node):
return md5(self.module_name + str(node.lineno) + repr(node)).hexdigest()
def track_lineno(self, node, module=False):
if self.source_tracking and node.lineno:
if module:
self.w( self.spacing() + "$pyjs.track.module='%s';" % self.module_name)
if self.line_tracking:
self.w( self.spacing() + "$pyjs.track.lineno=%d;" % node.lineno)
#self.w( self.spacing() + "if ($pyjs.track.module!='%s') debugger;" % self.module_name)
if self.store_source:
self.track_lines[node.lineno] = self.get_line_trace(node)
def track_call(self, call_code, lineno=None):
if not self.ignore_debug and self.debug and len(call_code.strip()) > 0:
dbg = self.uniqid("$pyjs_dbg_")
mod = self.module_name
s = self.spacing()
call_code = """\
(function(){try{try{$pyjs.in_try_except += 1;
%(s)sreturn %(call_code)s;
}finally{$pyjs.in_try_except-=1;}}catch(%(dbg)s_err){\
if (!@{{isinstance}}(%(dbg)s_err, @{{StopIteration}}))\
{@{{_handle_exception}}(%(dbg)s_err);}\
throw %(dbg)s_err;
}})()""" % locals()
return call_code
__generator_code_str = """\
var $generator_state = [0], $generator_exc = [null], $yield_value = null, $exc = null, $is_executing=false;
var $generator = function () {};
$generator['next'] = function (noStop) {
%(src1)s
var $res;
$yield_value = $exc = null;
try {
$res = $generator['$genfunc']();
$is_executing=false;
if (typeof $res == 'undefined') {
if (noStop === true) {
$generator_state[0] = -1;
return;
}
throw @{{StopIteration}}();
}
} catch (e) {
%(src2)s
$is_executing=false;
$generator_state[0] = -1;
if (noStop === true && @{{isinstance}}(e, @{{StopIteration}})) {
return;
}
throw e;
}
return $res;
};
$generator['__iter__'] = function () {return $generator;};
$generator['send'] = function ($val) {
%(src1)s
$yield_value = $val;
$exc = null;
try {
var $res = $generator['$genfunc']();
if (typeof $res == 'undefined') throw @{{StopIteration}}();
} catch (e) {
%(src2)s
$generator_state[0] = -1;
$is_executing=false;
throw e;
}
$is_executing=false;
return $res;
};
$generator['$$throw'] = function ($exc_type, $exc_value) {
%(src1)s
$yield_value = null;
$exc=(typeof $exc_value == 'undefined' ? $exc_type() :
(@{{isinstance}}($exc_value, $exc_type)
? $exc_value : $exc_type($exc_value)));
try {
var $res = $generator['$genfunc']();
} catch (e) {
%(src2)s
$generator_state[0] = -1;
$is_executing=false;
throw (e);
}
$is_executing=false;
return $res;
};
$generator['close'] = function () {
%(src1)s
$yield_value = null;
$exc=@{{GeneratorExit}}();
try {
var $res = $generator['$genfunc']();
$is_executing=false;
if (typeof $res != 'undefined') throw @{{RuntimeError}}('generator ignored GeneratorExit');
} catch (e) {
%(src2)s
$generator_state[0] = -1;
$is_executing=false;
if (@{{isinstance}}(e, @{{StopIteration}}) || @{{isinstance}}(e, @{{GeneratorExit}})) return null;
throw (e);
}
return null;
};
$generator['$genfunc'] = function () {
var $yielding = false;
if ($is_executing) throw @{{ValueError}}('generator already executing');
$is_executing = true;
"""
__generator_code_str = __generator_code_str.replace(" ", "\t").replace("\n", "\n%(s)s")
def generator(self, code):
if self.is_generator:
s = self.spacing()
if self.source_tracking:
src1 = "var $pyjs__trackstack_size_%d = $pyjs.trackstack.length;" % self.stacksize_depth
src2 = """\
%(s)ssys.save_exception_stack();
%(s)sif ($pyjs.trackstack.length > $pyjs__trackstack_size_%(d)d) {
%(s)s\t$pyjs.trackstack = $pyjs.trackstack.slice(0,$pyjs__trackstack_size_%(d)d);
%(s)s\t$pyjs.track = $pyjs.trackstack.slice(-1)[0];
%(s)s}
%(s)s$pyjs.track.module='%(m)s';""" % {'s': self.spacing(), 'd': self.stacksize_depth, 'm': self.module_name}
else:
src1 = src2 = ""
self.w( self.__generator_code_str % locals())
self.indent()
self.w( code)
self.w( self.spacing() + "return;")
self.w( self.dedent() + "};")
self.w( self.spacing() + "return $generator;")
else:
self.w( captured_output, False)
def generator_switch_open(self):
if self.is_generator:
self.indent()
def generator_switch_case(self, increment):
if self.is_generator:
if increment:
self.generator_states[-1] += 1
n_states = len(self.generator_states)
state = self.generator_states[-1]
if self.generator_states[-1] == 0:
self.dedent()
self.w( self.indent() + """if (typeof $generator_state[%d] == 'undefined' || $generator_state[%d] === 0) {""" % (n_states-1, n_states-1))
self.generator_clear_state()
if n_states == 1:
self.generator_throw()
else:
if increment:
self.w( self.spacing() + """$generator_state[%d]=%d;""" % (n_states-1, state))
self.w( self.dedent() + "}")
self.w( self.indent() + """if ($generator_state[%d] == %d) {""" % (n_states-1, state))
def generator_switch_close(self):
if self.is_generator:
self.w( self.dedent() + "}")
def generator_add_state(self):
if self.is_generator:
self.generator_states.append(0)
self.state_max_depth = len(self.generator_states)
def generator_del_state(self):
if self.is_generator:
del self.generator_states[-1]
def generator_clear_state(self):
if self.is_generator:
n_states = len(self.generator_states)
self.w( self.spacing() + """for (var $i = %d ; $i < ($generator_state.length<%d?%d:$generator_state.length); $i++) $generator_state[$i]=0;""" % (n_states-1, n_states+1, n_states+1))
def generator_reset_state(self):
if self.is_generator:
n_states = len(self.generator_states)
self.w( self.spacing() + """$generator_state.splice(%d, $generator_state.length-%d);""" % (n_states, n_states))
def generator_throw(self):
self.w( self.indent() + "if (typeof $exc != 'undefined' && $exc !== null) {")
self.w( self.spacing() + "$yielding = null;")
self.w( self.spacing() + "$generator_state[%d] = -1;" % (len(self.generator_states)-1,))
self.w( self.spacing() + "throw $exc;")
self.w( self.dedent() + "}")
def func_args(self, node, current_klass, function_name, bind_type, args, stararg, dstararg):
try:
bind_type = BIND_TYPES_NUMERIC[bind_type]
except KeyError:
raise TranslationError("Unknown bind type: %s" % bind_type, node)
_args = []
default_pos = len(args) - len(node.defaults)
for idx, arg in enumerate(args):
if idx < default_pos:
_args.append("['%s']" % arg)
else:
default_value = self.expr(node.defaults[idx-default_pos], current_klass)
_args.append("""['%s', %s]""" % (arg, default_value))
args = ",".join(_args)
if dstararg:
args = "['%s'],%s" % (dstararg, args)
else:
args = "null,%s" % args
if stararg:
args = "'%s',%s" % (stararg, args)
else:
args = "null,%s" % args
args = '[' + args + ']'
# remove any empty tail
if args.endswith(',]'):
args = args[:-2] + ']'
if function_name is None:
self.w( "\t, %d, %s);" % (bind_type, args))
else:
self.w( self.spacing() + "%s.__bind_type__ = %s;" % (function_name, bind_type))
self.w( self.spacing() + "%s.__args__ = %s;" % (function_name, args))
def _instance_method_init(self, node, arg_names, varargname, kwargname,
current_klass, output=None):
output = output or self.output
maxargs1 = len(arg_names) - 1
maxargs2 = len(arg_names)
minargs1 = maxargs1 - len(node.defaults)
minargs2 = maxargs2 - len(node.defaults)
if node.kwargs:
maxargs1 += 1
maxargs2 += 1
maxargs1str = "%d" % maxargs1
maxargs2str = "%d" % maxargs2
if node.varargs:
argcount1 = "arguments.length < %d" % minargs1
maxargs1str = "null"
elif minargs1 == maxargs1:
argcount1 = "arguments.length != %d" % minargs1
else:
argcount1 = "(arguments.length < %d || arguments.length > %d)" % (minargs1, maxargs1)
if node.varargs:
argcount2 = "arguments.length < %d" % minargs2
maxargs2str = "null"
elif minargs2 == maxargs2:
argcount2 = "arguments.length != %d" % minargs2
else:
argcount2 = "(arguments.length < %d || arguments.length > %d)" % (minargs2, maxargs2)
s = self.spacing()
if self.create_locals:
args = ["this", "arguments"]
args.append("%d" % len(node.defaults))
args.append(bool(node.varargs) and "true" or "false")
args.append(bool(node.kwargs) and "true" or "false")
args = ", ".join(args)
self.w(s + "var $l = $pyjs_instance_method_get(%s);" % args)
args = []
if node.varargs:
args.append("%(varargname)s = $l.%(varargname)s" % locals())
if node.kwargs:
args.append("%(kwargname)s = $l.%(kwargname)s" % locals())
args = ", ".join(args)
if args:
self.w( s + "var %s;" % args)
if arg_names:
an = arg_names[0]
self.w( s + "var %s = $l.%s;" % (an, an))
args = []
for an in arg_names[1:]:
args.append("%s = $l.%s" % (an, an))
if args:
args = ", ".join(args)
self.w( s + "%s;" % args)
if False: #arg_names:
an = arg_names[0]
self.w( s + "if (this.__is_instance__ === true) {")
self.w( s + "\tvar %s = this;" % an)
self.w( s + "} else {")
self.w( s + "\t%s = $l.%s;" % (an, an))
self.w( s + "}")
for an in arg_names[1:]:
an = (an, an, an)
self.w(s + "%s = $pyjsdf(%s, $l.%s);" % an)
return
lpself = "var "
lp = ""
self.w(self.indent() + """\
if (this.__is_instance__ === true) {\
""", output=output)
if arg_names:
self.w( self.spacing() + """\
%s%s = this;\
""" % (lpself, arg_names[0]), output=output)
if node.varargs:
self._varargs_handler(node, varargname, maxargs1, lp)
if node.kwargs:
self.w( self.spacing() + """\
%s%s = arguments.length >= %d ? arguments[arguments.length-1] : arguments[arguments.length];\
""" % (lpself, kwargname, maxargs1), output=output)
s = self.spacing()
self.w( """\
%(s)sif (typeof %(lp)s%(kwargname)s != 'object' || %(lp)s%(kwargname)s.__name__ != 'dict' || typeof %(lp)s%(kwargname)s.$pyjs_is_kwarg == 'undefined') {\
""" % locals(), output=output)
if node.varargs:
self.w( """\
%(s)s\tif (typeof %(lp)s%(kwargname)s != 'undefined') %(lp)s%(varargname)s.__array.push(%(lp)s%(kwargname)s);\
""" % locals(), output=output)
self.w( """\
%(s)s\t%(lpself)s%(kwargname)s = arguments[arguments.length+1];
%(s)s} else {
%(s)s\tdelete %(lp)s%(kwargname)s['$pyjs_is_kwarg'];
%(s)s}\
""" % locals(), output=output)
if self.function_argument_checking:
self.w( self.spacing() + """\
if ($pyjs.options.arg_count && %s) $pyjs__exception_func_param(arguments.callee.__name__, %d, %s, arguments.length+1);\
""" % (argcount1, minargs2, maxargs2str), output=output)
self.w( self.dedent() + """\
} else {\
""", output=output)
self.indent()
if arg_names:
self.w( self.spacing() + """\
%s%s = arguments[0];\
""" % (lpself, arg_names[0]), output=output)
arg_idx = 0
for arg_name in arg_names[1:]:
arg_idx += 1
self.w( self.spacing() + """\
%s%s = arguments[%d];\
""" % (lp, arg_name, arg_idx), output=output)
if node.varargs:
self._varargs_handler(node, varargname, maxargs2, lp)
if node.kwargs:
self.w( self.spacing() + """\
%s%s = arguments.length >= %d ? arguments[arguments.length-1] : arguments[arguments.length];\
""" % (lpself, kwargname, maxargs2), output=output)
s = self.spacing()
self.w( """\
%(s)sif (typeof %(lp)s%(kwargname)s != 'object' || %(lp)s%(kwargname)s.__name__ != 'dict' || typeof %(lp)s%(kwargname)s.$pyjs_is_kwarg == 'undefined') {\
""" % locals(), output=output)
if node.varargs:
self.w( """\
%(s)s\tif (typeof %(lp)s%(kwargname)s != 'undefined') %(lp)s%(varargname)s.__array.push(%(lp)s%(kwargname)s);\
""" % locals(), output=output)
self.w( """\
%(s)s\t%(lp)s%(kwargname)s = arguments[arguments.length+1];
%(s)s} else {
%(s)s\tdelete %(lp)s%(kwargname)s['$pyjs_is_kwarg'];
%(s)s}\
""" % locals(), output=output)
if self.function_argument_checking:
self.w( """\
%sif ($pyjs.options.arg_is_instance && self.__is_instance__ !== true) $pyjs__exception_func_instance_expected(arguments.callee.__name__, arguments.callee.__class__.__name__, self);
%sif ($pyjs.options.arg_count && %s) $pyjs__exception_func_param(arguments.callee.__name__, %d, %s, arguments.length);\
""" % (self.spacing(), self.spacing(), argcount2, minargs2, maxargs2str), output=output)
self.w( self.dedent() + "}", output=output)
if arg_names and self.function_argument_checking:
self.w( """\
%(s)sif ($pyjs.options.arg_instance_type) {
%(s)s\tif (%(self)s.prototype.__md5__ !== '%(__md5__)s') {
%(s)s\t\tif (!@{{_isinstance}}(%(self)s, arguments['callee']['__class__'])) {
%(s)s\t\t\t$pyjs__exception_func_instance_expected(arguments['callee']['__name__'], arguments['callee']['__class__']['__name__'], %(self)s);
%(s)s\t\t}
%(s)s\t}
%(s)s}\
""" % {'s': self.spacing(), 'self': arg_names[0], '__md5__': current_klass.__md5__}, output=output)
def _static_method_init(self, node, arg_names, varargname, kwargname,
current_klass, output=None):
output = output or self.output
maxargs = len(arg_names)
minargs = maxargs - len(node.defaults)
maxargsstr = "%d" % maxargs
s = self.spacing()
if False: # self.create_locals:
lp = "$l."
lpdec = ""
self.w(s + "var $l = {};")
arg_idx = 0
for arg_name in arg_names:
self.w( s + """%s%s = arguments[%d];""" % \
(lp, arg_name, arg_idx), output=output)
arg_idx += 1
else:
lpdec = "var "
lp = ""
if node.kwargs:
maxargs += 1
if node.varargs:
argcount = "arguments.length < %d" % minargs
maxargsstr = "null"
elif minargs == maxargs:
argcount = "arguments.length != %d" % minargs
else:
argcount = "(arguments.length < %d || arguments.length > %d)" % (minargs, maxargs)
if self.function_argument_checking:
self.w( self.spacing() + """\
if ($pyjs.options.arg_count && %s) $pyjs__exception_func_param(arguments.callee.__name__, %d, %s, arguments.length);\
""" % (argcount, minargs, maxargsstr), output=output)
if node.varargs:
self._varargs_handler(node, varargname, maxargs, lp)
if node.kwargs:
self.w( self.spacing() + """\
%s%s = arguments.length >= %d ? arguments[arguments.length-1] : arguments[arguments.length];\
""" % (lpdec, kwargname, maxargs), output=output)
s = self.spacing()
self.w( """\
%(s)sif (typeof %(lp)s%(kwargname)s != 'object' || %(lp)s%(kwargname)s.__name__ != 'dict' || typeof %(lp)s%(kwargname)s.$pyjs_is_kwarg == 'undefined') {\
""" % locals(), output=output)
if node.varargs:
self.w( """\
%(s)s\tif (typeof %(lp)s%(kwargname)s != 'undefined') %(varargname)s.__array.push(%(lp)s%(kwargname)s);\
""" % locals(), output=output)
self.w( """\
%(s)s\t%(lp)s%(kwargname)s = arguments[arguments.length+1];
%(s)s} else {
%(s)s\tdelete %(lp)s%(kwargname)s['$pyjs_is_kwarg'];
%(s)s}\
""" % locals(), output=output)
def _class_method_init(self, node, arg_names, varargname, kwargname,
current_klass, output=None):
output = output or self.output
maxargs = max(0, len(arg_names) -1)
minargs = max(0, maxargs - len(node.defaults))
maxargsstr = "%d" % (maxargs+1)
if node.kwargs:
maxargs += 1
if node.varargs:
argcount = "arguments.length < %d" % minargs
maxargsstr = "null"
elif minargs == maxargs:
argcount = "arguments.length != %d" % minargs
maxargsstr = "%d" % (maxargs)
else:
argcount = "(arguments.length < %d || arguments.length > %d)" % (minargs, maxargs)
if self.function_argument_checking:
self.w( """\
if ($pyjs.options.arg_is_instance && this.__is_instance__ !== true && this.__is_instance__ !== false) $pyjs__exception_func_class_expected(arguments.callee.__name__, arguments.callee.__class__.__name__);
if ($pyjs.options.arg_count && %s) $pyjs__exception_func_param(arguments.callee.__name__, %d, %s, arguments.length);\
""" % (argcount, minargs+1, maxargsstr), output=output)
self.w( """\
var %s = this.prototype;\
""" % (arg_names[0],), output=output)
if node.varargs:
self._varargs_handler(node, varargname, maxargs, "")
if node.kwargs:
self.w( self.spacing() + """\
var %s = arguments.length >= %d ? arguments[arguments.length-1] : arguments[arguments.length];\
""" % (kwargname, maxargs), output=output)
s = self.spacing()
self.w( """\
%(s)sif (typeof %(kwargname)s != 'object' || %(kwargname)s.__name__ != 'dict' || typeof %(kwargname)s.$pyjs_is_kwarg == 'undefined') {\
""" % locals(), output=output)
if node.varargs:
self.w( """\
%(s)s\tif (typeof %(kwargname)s != 'undefined') %(varargname)s.__array.push(%(kwargname)s);\
""" % locals(), output=output)
self.w( """\
%(s)s\t%(kwargname)s = arguments[arguments.length+1];
%(s)s}\
""" % locals(), output=output)
def _default_args_handler(self, node, arg_names, current_klass, kwargname,
lp, output=None):
output = output or self.output
if node.kwargs:
# This is necessary when **kwargs in function definition
# and the call didn't pass the pyjs_kwargs_call().
# See libtest testKwArgsInherit
# This is not completely safe: if the last element in arguments
# is an dict and the corresponding argument shoud be a dict and
# the kwargs should be empty, the kwargs gets incorrectly the
# dict and the argument becomes undefined.
# E.g.
# def fn(a = {}, **kwargs): pass
# fn({'a':1}) -> a gets undefined and kwargs gets {'a':1}
revargs = arg_names[0:]
revargs.reverse()
self.w( """\
%(s)sif (typeof %(lp)s%(k)s == 'undefined') {
%(s)s\t%(lp)s%(k)s = @{{__empty_dict}}();\
""" % {'lp': lp, 's': self.spacing(), 'k': kwargname}, output=output)
for v in revargs:
self.w( """\
%(s)s\tif (typeof %(lp)s%(v)s != 'undefined') {
%(s)s\t\tif (%(lp)s%(v)s !== null && typeof %(lp)s%(v)s['$pyjs_is_kwarg'] != 'undefined') {
%(s)s\t\t\t%(lp)s%(k)s = %(lp)s%(v)s;
%(s)s\t\t\t%(lp)s%(v)s = arguments[%(a)d];
%(s)s\t\t}
%(s)s\t} else\
""" % {'lp': lp, 's': self.spacing(), 'v': v, 'k': kwargname, 'a': len(arg_names)}, False, output=output)
self.w( """\
{
%(s)s\t}
%(s)s}\
""" % {'s': self.spacing()}, output=output)
if len(node.defaults):
default_pos = len(arg_names) - len(node.defaults)
for default_node in node.defaults:
#default_value = self.expr(default_node, current_klass)
default_name = arg_names[default_pos]
default_pos += 1
#self.w( self.spacing() + "if (typeof %s == 'undefined') %s=%s;" % (default_name, default_name, default_value))
self.w( self.spacing() + "if (typeof %s%s == 'undefined') %s%s=arguments.callee.__args__[%d][1];" % (lp, default_name, lp, default_name, default_pos+1), output=output)
def _varargs_handler(self, node, varargname, start, lp):
if node.kwargs:
end = "arguments.length-1"
start -= 1
else:
end = "arguments.length"
if not lp:
lp = 'var '
self.w( """\
%(s)s%(lp)s%(v)s = $p['tuple']($pyjs_array_slice.call(arguments,%(b)d,%(e)s));
""" % {'s': self.spacing(), 'v': varargname, 'b': start, 'e': end, 'lp': lp})
def _kwargs_parser(self, node, function_name, arg_names, current_klass, method_ = False):
default_pos = len(arg_names) - len(node.defaults)
if not method_:
self.w( self.indent() + function_name+'.parse_kwargs = function (', ", ".join(["__kwargs"]+arg_names) + " ) {")
else:
self.w( self.indent() + ", function (", ", ".join(["__kwargs"]+arg_names) + " ) {")
self.w( self.spacing() + "var __r = [];")
self.w( self.spacing() + "var $pyjs__va_arg_start = %d;" % (len(arg_names)+1))
if len(arg_names) > 0:
self.w( """\
%(s)sif (typeof %(arg_name)s != 'undefined' && this.__is_instance__ === false && %(arg_name)s.__is_instance__ === true) {
%(s)s\t__r.push(%(arg_name)s);
%(s)s\t$pyjs__va_arg_start++;""" % {'s': self.spacing(), 'arg_name': arg_names[0]})
idx = 1
for arg_name in arg_names:
idx += 1
self.w( """\
%(s)s\t%(arg_name)s = arguments[%(idx)d];\
""" % {'s': self.spacing(), 'arg_name': arg_name, 'idx': idx})
self.w( self.spacing() + "}")
for arg_name in arg_names:
if self.function_argument_checking:
self.w( """\
%(s)sif (typeof %(arg_name)s == 'undefined') {
%(s)s\t%(arg_name)s=__kwargs.%(arg_name)s;
%(s)s\tdelete __kwargs.%(arg_name)s;
%(s)s} else if ($pyjs.options.arg_kwarg_multiple_values && typeof __kwargs.%(arg_name)s != 'undefined') {
%(s)s\t$pyjs__exception_func_multiple_values('%(function_name)s', '%(arg_name)s');
%(s)s}\
""" % {'s': self.spacing(), 'arg_name': arg_name, 'function_name': function_name})
else:
self.w( self.indent() + "if (typeof %s == 'undefined') {"%(arg_name))
self.w( self.spacing() + "%s=__kwargs.%s;"% (arg_name, arg_name))
self.w( self.dedent() + "}")
self.w( self.spacing() + "__r.push(%s);" % arg_name)
if self.function_argument_checking and not node.kwargs:
self.w( """\
%(s)sif ($pyjs.options.arg_kwarg_unexpected_keyword) {
%(s)s\tfor (var i in __kwargs) {
%(s)s\t\t$pyjs__exception_func_unexpected_keyword('%(function_name)s', i);
%(s)s\t}
%(s)s}\
""" % {'s': self.spacing(), 'function_name': function_name})
# Always add all remaining arguments. Needed for argument checking _and_ if self != this;
self.w( """\
%(s)sfor (var $pyjs__va_arg = $pyjs__va_arg_start;$pyjs__va_arg < arguments.length;$pyjs__va_arg++) {
%(s)s\t__r.push(arguments[$pyjs__va_arg]);
%(s)s}
""" % {'s': self.spacing()})
if node.kwargs:
self.w( self.spacing() + "__r.push($p['dict'](__kwargs));")
self.w( self.spacing() + "return __r;")
if not method_:
self.w( self.dedent() + "};")
else:
self.w( self.dedent() + "});")
def _import(self, node, current_klass, root_level = False):
# XXX: hack for in-function checking, we should have another
# object to check our scope
self._doImport(node.names, current_klass, root_level, True)
def _doImport(self, names, current_klass, root_level, assignBase,
absPath=False, all=False):
if root_level:
modtype = 'root-module'
else:
modtype = 'module'
for importName, importAs in names:
if importName == '__pyjamas__':
continue
if importName.endswith(".js"):
self.add_imported_module(importName)
continue
# "searchList" contains a list of possible module names :
# We create the list at compile time to save runtime.
searchList = []
context = self.module_name
if '.' in context:
# our context lives in a package so it is possible to have a
# relative import
package = context.rsplit('.', 1)[0]
relName = package + '.' + importName
searchList.append(relName)
if '.' in importName:
searchList.append(relName.rsplit('.', 1)[0])
# the absolute path
searchList.append(importName)
if '.' in importName:
searchList.append(importName.rsplit('.', 1)[0])
mod = self.lookup(importName)
package_mod = self.lookup(importName.split('.', 1)[0])
if self.source_tracking:
self.w( self.spacing() + "$pyjs.track={module:$pyjs.track.module,lineno:$pyjs.track.lineno};$pyjs.trackstack.push($pyjs.track);")
import_stmt = None
if ( mod[0] != 'root-module'
or (assignBase and not package_mod[0] in ['root-module', 'module'])
):
# the import statement
if absPath:
context = 'null'
else:
context = self.import_context
if not all:
import_stmt = "@{{___import___}}('%s', %s" % (
importName,
context,
)
else:
import_stmt = "@{{__import_all__}}('%s', %s, %s" %(
importName,
context,
self.modpfx()[:-1],
)
if not assignBase:
self.w( self.spacing() + import_stmt + ', null, false);')
self._lhsFromName(importName, current_klass, modtype)
self.add_imported_module(importName)
if assignBase:
# get the name in scope
package_name = importName.split('.')[0]
if importAs:
ass_name = importAs
if not import_stmt is None:
import_stmt += ', null, false'
else:
ass_name = package_name
lhs = self._lhsFromName(ass_name, current_klass, modtype)
if importAs:
mod_name = importName
else:
mod_name = ass_name
if import_stmt is None:
#stmt = "%s = $pyjs.__modules__['%s'];"% (lhs, "']['".join(mod_name.split('.')))
parent_mod_name = mod_name.split('.')
if len(parent_mod_name) == 1:
stmt = "%s = $pyjs.loaded_modules['%s'];"% (lhs, mod_name)
else:
mod_name = parent_mod_name[-1]
parent_mod_name = '.'.join(parent_mod_name[:-1])
stmt = "%s = $pyjs.loaded_modules['%s']['%s'];"% (lhs, parent_mod_name, mod_name)
else:
stmt = "%s = %s);"% (lhs, import_stmt)
self.w( self.spacing() + stmt)
if self.source_tracking:
self.w( self.spacing() + "$pyjs.trackstack.pop();$pyjs.track=$pyjs.trackstack.pop();$pyjs.trackstack.push($pyjs.track);")
def _from(self, node, current_klass, root_level = False):
if node.modname == '__pyjamas__':
# special module to help make pyjamas modules loadable in
# the python interpreter
for name in node.names:
ass_name = name[1] or name[0]
try:
jsname = getattr(__pyjamas__, name[0])
if callable(jsname):
self.add_lookup("__pyjamas__", ass_name, name[0])
else:
self.add_lookup("__pyjamas__", ass_name, jsname)
except AttributeError, e:
#raise TranslationError("Unknown __pyjamas__ import: %s" % name, node)
pass
return
if node.modname == '__javascript__':
for name in node.names:
ass_name = name[1] or name[0]
self.add_lookup("__javascript__", ass_name, name[0])
return
if node.modname == '__future__':
for name in node.names:
future = getattr(__future__, name[0], None)
if callable(future):
future(self)
else:
# Ignoring from __future__ import name[0]
pass
return
# XXX: hack for in-function checking, we should have another
# object to check our scope
absPath = False
modname = node.modname
if hasattr(node, 'level') and node.level > 0:
absPath = True
if self.relative_import_context is not None:
modname = self.relative_import_context.split('.')
level = node.level - 1
if self.relative_import_context is None or len(modname) <= level:
raise TranslationError(
"Attempted relative import beyond toplevel package",
node, self.module_name)
if level:
modname = '.'.join(modname[:-level])
else:
modname = self.relative_import_context
if node.modname:
modname += '.' + node.modname
for name in node.names:
if name[0] == "*":
self._doImport(((modname, name[0]),), current_klass,
root_level, False, absPath, True)
continue
sub = modname + '.' + name[0]
ass_name = name[1] or name[0]
self._doImport(((sub, ass_name),), current_klass, root_level, True, absPath)
def _function(self, node, current_klass, force_local=False):
if self.is_class_definition:
return self._method(node, current_klass)
save_top_level = self.top_level
self.push_options()
save_has_js_return = self.has_js_return
self.has_js_return = False
save_has_yield = self.has_yield
self.has_yield = False
save_is_generator = self.is_generator
self.is_generator = False
save_generator_states = self.generator_states
self.generator_states = [0]
self.state_max_depth = len(self.generator_states)
if not save_top_level or force_local:
function_name = node.name
else:
function_name = self.modpfx() + node.name
function_name = self.add_lookup('function', node.name, function_name)
staticmethod, classmethod, decorator_code = self.parse_decorators(node, node.name, current_klass)
if staticmethod or classmethod:
raise TranslationError(
"Decorators staticmethod and classmethod not implemented for functions",
v.node, self.module_name)
self.push_lookup()
arg_names = []
for arg in node.argnames:
if isinstance(arg, tuple):
for a in arg:
arg_names.append(self.add_lookup('variable', a, a))
else:
arg_names.append(self.add_lookup('variable', arg, arg))
normal_arg_names = list(arg_names)
if node.kwargs:
kwargname = normal_arg_names.pop()
else:
kwargname = None
if node.varargs:
varargname = normal_arg_names.pop()
else:
varargname = None
declared_arg_names = list(normal_arg_names)
#if node.kwargs: declared_arg_names.append(kwargname)
function_args = "(" + ", ".join(declared_arg_names) + ")"
self.w( self.indent() + "%s = function%s {" % (function_name, function_args))
self._static_method_init(node, declared_arg_names, varargname, kwargname, None)
#lp = self.create_locals and "$l." or ""
self._default_args_handler(node, declared_arg_names, None, kwargname, "")
local_arg_names = normal_arg_names + declared_arg_names
if node.kwargs:
local_arg_names.append(kwargname)
if node.varargs:
local_arg_names.append(varargname)
self.top_level = False
save_output = self.output
self.output = StringIO()
if self.source_tracking:
self.w( self.spacing() + "$pyjs.track={module:'%s',lineno:%d};$pyjs.trackstack.push($pyjs.track);" % (self.module_name, node.lineno))
self.track_lineno(node, True)
for child in node.code:
self._stmt(child, None)
if not self.has_yield and self.source_tracking and self.has_js_return:
self.source_tracking = False
self.output = StringIO()
for child in node.code:
self._stmt(child, None)
elif self.has_yield:
if self.has_js_return:
self.source_tracking = False
self.is_generator = True
self.generator_states = [0]
self.output = StringIO()
self.indent()
if self.source_tracking:
self.w( self.spacing() + "$pyjs.track={module:'%s',lineno:%d};$pyjs.trackstack.push($pyjs.track);" % (self.module_name, node.lineno))
self.track_lineno(node, True)
self.generator_switch_open()
self.generator_switch_case(increment=False)
for child in node.code:
self._stmt(child, None)
self.generator_switch_case(increment=True)
self.generator_switch_close()
self.dedent()
captured_output = self.output.getvalue()
self.output = save_output
self.w( self.local_js_vars_decl(local_arg_names))
if self.is_generator:
self.generator(captured_output)
else:
self.w( captured_output, False)
# we need to return null always, so it is not undefined
if node.code.nodes:
lastStmt = node.code.nodes[-1]
else:
lastStmt = None
if not isinstance(lastStmt, self.ast.Return):
if self.source_tracking:
self.w( self.spacing() + "$pyjs.trackstack.pop();$pyjs.track=$pyjs.trackstack.pop();$pyjs.trackstack.push($pyjs.track);")
# FIXME: check why not on on self._isNativeFunc(lastStmt)
if not self._isNativeFunc(lastStmt):
self.w( self.spacing() + "return null;")
self.w( self.dedent() + "};")
self.w( self.spacing() + "%s.__name__ = '%s';\n" % (function_name, node.name))
self.pop_lookup()
self.func_args(node, current_klass, function_name, 'func', declared_arg_names, varargname, kwargname)
if decorator_code:
decorator_code = decorator_code % function_name
if function_name != decorator_code:
self.w( self.spacing() + "%s = %s;" % (function_name, decorator_code))
self.generator_states = save_generator_states
self.state_max_depth = len(self.generator_states)
self.is_generator = save_is_generator
self.has_yield = save_has_yield
self.has_js_return = save_has_js_return
self.pop_options()
self.top_level = save_top_level
def _assert(self, node, current_klass):
expr = self.expr(node.test, current_klass)
if node.fail:
fail = self.expr(node.fail, current_klass)
else:
fail = ''
self.w( self.spacing() + "if (!( " + expr + " )) {")
self.w( self.spacing() + " throw @{{AssertionError}}(%s);" % fail)
self.w( self.spacing() + " }")
def _return(self, node, current_klass):
expr = self.expr(node.value, current_klass)
# in python a function call always returns None, so we do it
# here too
self.track_lineno(node)
if self.is_generator:
if isinstance(node.value, self.ast.Const):
if node.value.value is None:
if self.source_tracking:
self.w( self.spacing() + "$pyjs.trackstack.pop();$pyjs.track=$pyjs.trackstack.pop();$pyjs.trackstack.push($pyjs.track);")
self.w( self.spacing() + "return;")
return
raise TranslationError(
"'return' with argument inside generator",
node, self.module_name)
elif self.source_tracking:
self.w( self.spacing() + "var $pyjs__ret = " + expr + ";")
self.w( self.spacing() + "$pyjs.trackstack.pop();$pyjs.track=$pyjs.trackstack.pop();$pyjs.trackstack.push($pyjs.track);")
self.w( self.spacing() + "return $pyjs__ret;")
else:
self.w( self.spacing() + "return " + expr + ";")
def _yield(self, node, current_klass):
# http://www.python.org/doc/2.5.2/ref/yieldexpr.html
self.has_yield = True
expr = self.expr(node.value, current_klass)
self.track_lineno(node)
#self.w( self.spacing() + "$generator_state[%d] = %d;" % (len(self.generator_states)-1, self.generator_states[-1]+1)
self.w( self.spacing() + "$yield_value = " + expr + ";")
if self.source_tracking:
self.w( self.spacing() + "$pyjs.trackstack.pop();$pyjs.track=$pyjs.trackstack.pop();$pyjs.trackstack.push($pyjs.track);")
self.w( self.spacing() + "$yielding = true;")
self.w( self.spacing() + "$generator_state[%d] = %d;" % (len(self.generator_states)-1, self.generator_states[-1]+1))
self.w( self.spacing() + "return $yield_value;")
self.generator_switch_case(increment=True)
self.generator_throw()
def _yield_expr(self, node, current_klass):
self._yield(node, current_klass)
return '$yield_value'
def _break(self, node, current_klass):
self.generator_switch_case(increment=True)
self.w( self.spacing() + "break;")
def _continue(self, node, current_klass):
self.w( self.spacing() + "continue;")
def _callfunc_code(self, v, current_klass, is_statement=False, optlocal_var=False):
self.ignore_debug = False
method_name = None
if isinstance(v.node, self.ast.Name):
name_type, pyname, jsname, depth, is_local = self.lookup(v.node.name)
if name_type == '__pyjamas__':
try:
raw_js = getattr(__pyjamas__, v.node.name)
if callable(raw_js):
raw_js, has_js_return = raw_js(self, v, current_klass,
is_statement=is_statement)
if has_js_return:
self.has_js_return = True
else:
raw_js = self.translate_escaped_names(raw_js, current_klass)
return raw_js
except AttributeError, e:
raise TranslationError(
"Unknown __pyjamas__ function %s" % pyname,
v.node, self.module_name)
except TranslationError, e:
raise TranslationError(e.msg, v, self.module_name)
elif v.node.name == 'locals':
return """$p.dict({%s})""" % (",".join(["'%s': %s" % (pyname, self.lookup_stack[-1][pyname][2]) for pyname in self.lookup_stack[-1] if self.lookup_stack[-1][pyname][0] not in ['__pyjamas__', 'global']]))
elif v.node.name == 'globals':
# XXX: Should be dictproxy, to handle changes
return "@{{_globals}}(%s)" % self.modpfx()[:-1]
elif v.node.name == 'len' and depth == -1 and len(v.args) == 1:
expr = self.expr(v.args[0], current_klass)
return self.inline_len_code(expr)
else:
if name_type is None:
# What to do with a (yet) unknown name?
# Just nothing...
if optlocal_var:
call_name = '(typeof %s == "undefined"?%s:%s)' % (
v.node.name,
self.scopeName(v.node.name, depth, is_local),
v.node.name,
)
else:
call_name = self.scopeName(v.node.name, depth, is_local)
else:
call_name = jsname
call_args = []
elif isinstance(v.node, self.ast.Getattr):
attrname = self.attrib_remap(v.node.attrname)
if isinstance(v.node.expr, self.ast.Name):
call_name, method_name = self._name2(v.node.expr, current_klass, attrname)
call_args = []
elif isinstance(v.node.expr, self.ast.Getattr):
call_name = self._getattr2(v.node.expr, current_klass, v.node.attrname)
method_name = call_name.pop()
call_name = self.attrib_join(call_name)
call_args = []
elif isinstance(v.node.expr, self.ast.CallFunc):
call_name = self._callfunc(v.node.expr, current_klass)
method_name = attrname
call_args = []
elif isinstance(v.node.expr, self.ast.Subscript):
call_name = self._subscript(v.node.expr, current_klass)
method_name = attrname
call_args = []
elif isinstance(v.node.expr, self.ast.Const):
call_name = self.expr(v.node.expr, current_klass)
method_name = attrname
call_args = []
elif isinstance(v.node.expr, self.ast.Slice):
call_name = self._slice(v.node.expr, current_klass)
method_name = attrname
call_args = []
else:
raise TranslationError(
"unsupported type (in _callfunc)", v.node.expr, self.module_name)
elif isinstance(v.node, self.ast.CallFunc):
call_name = self._callfunc(v.node, current_klass)
call_args = []
elif isinstance(v.node, self.ast.Subscript):
call_name = self._subscript(v.node, current_klass)
call_args = []
else:
raise TranslationError(
"unsupported type (in _callfunc)", v.node, self.module_name)
if method_name in pyjs_attrib_remap:
method_name = pyjs_attrib_remap[method_name]
call_name = strip_py(call_name)
kwargs = []
star_arg_name = None
if v.star_args:
star_arg_name = self.expr(v.star_args, current_klass)
dstar_arg_name = None
if v.dstar_args:
dstar_arg_name = self.expr(v.dstar_args, current_klass)
for ch4 in v.args:
if isinstance(ch4, self.ast.Keyword):
kwarg = self.vars_remap(ch4.name) + ":" + \
self.expr(ch4.expr, current_klass)
kwargs.append(kwarg)
else:
arg = self.expr(ch4, current_klass)
call_args.append(arg)
if kwargs:
fn_args = ", ".join(['{' + ', '.join(kwargs) + '}']+call_args)
else:
fn_args = ", ".join(['{}']+call_args)
if kwargs or star_arg_name or dstar_arg_name:
if not star_arg_name:
star_arg_name = 'null'
if not dstar_arg_name:
dstar_arg_name = 'null'
if method_name is None:
call_code = ("$pyjs_kwargs_call(null, "+call_name+", "
+ star_arg_name
+ ", " + dstar_arg_name
+ ", ["+fn_args+"]"
+ ")")
else:
call_code = ("$pyjs_kwargs_call("+call_name+", '"+method_name+"', "
+ star_arg_name
+ ", " + dstar_arg_name
+ ", ["+fn_args+"]"
+ ")")
else:
if not method_name is None:
call_name = "%s['%s']" % (call_name, method_name)
call_code = call_name + "(" + ", ".join(call_args) + ")"
return call_code
def _callfunc(self, v, current_klass, is_statement=False, optlocal_var=False):
call_code = self._callfunc_code(
v,
current_klass,
is_statement=is_statement,
optlocal_var=optlocal_var,
)
if not self.ignore_debug:
call_code = self.track_call(call_code, v.lineno)
return call_code
def _print(self, node, current_klass):
if not self.print_statements:
return
call_args = []
for ch4 in node.nodes:
arg = self.expr(ch4, current_klass)
call_args.append(arg)
self.w( self.spacing() + self.track_call("@{{printFunc}}([%s], %d)" % (', '.join(call_args), int(isinstance(node, self.ast.Printnl))), node.lineno) + ';')
def _tryFinally(self, node, current_klass):
body = node.body
if not isinstance(node.body, self.ast.TryExcept):
body = node
try: # python2.N
node.body.final = node.final
except: # lib2to3
node.body.final_ = node.final_
self._tryExcept(body, current_klass)
def _tryExcept(self, node, current_klass):
save_is_generator = self.is_generator
if self.is_generator:
self.is_generator = self.compiler.walk(node, GeneratorExitVisitor(), walker=GeneratorExitVisitor()).has_yield
self.try_depth += 1
self.stacksize_depth += 1
save_state_max_depth = self.state_max_depth
start_states = len(self.generator_states)
pyjs_try_err = '$pyjs_try_err'
if self.source_tracking:
self.w( self.spacing() + "var $pyjs__trackstack_size_%d = $pyjs.trackstack.length;" % self.stacksize_depth)
self.generator_switch_case(increment=True)
self.w( self.indent() + "try {")
added_try_except_counter = not self.ignore_debug and self.debug
if added_try_except_counter:
self.w( self.spacing() + "try {")
self.indent()
self.w( self.spacing() + "$pyjs.in_try_except += 1;")
if self.is_generator:
self.w( self.spacing() + "if (typeof $generator_exc[%d] != 'undefined' && $generator_exc[%d] !== null) throw $generator_exc[%d];" % (\
self.try_depth, self.try_depth, self.try_depth))
self.generator_add_state()
self.generator_switch_open()
self.generator_switch_case(increment=False)
if self.is_generator:
self.w( self.spacing() + "$generator_exc[%d] = null;" % (self.try_depth, ))
self.generator_switch_case(increment=True)
for stmt in node.body.nodes:
self._stmt(stmt, current_klass)
self.generator_switch_case(increment=True)
if hasattr(node, 'else_') and node.else_:
self.w( self.spacing() + "throw @{{TryElse}};")
self.generator_switch_case(increment=True)
self.generator_switch_case(increment=True)
self.generator_switch_close()
if added_try_except_counter:
self.w( self.dedent() + "} finally { $pyjs.in_try_except -= 1; }")
self.w( self.dedent() + "} catch(%s) {" % pyjs_try_err)
self.indent()
if self.source_tracking:
self.w( self.spacing() + "$pyjs.__last_exception_stack__ = sys.save_exception_stack($pyjs__trackstack_size_%d - 1);" % self.stacksize_depth)
self.w( self.spacing() + "$pyjs.__active_exception_stack__ = null;")
if self.is_generator:
self.w( self.spacing() + "$generator_exc[%d] = %s;" % (self.try_depth, pyjs_try_err))
try_state_max_depth = self.state_max_depth
self.generator_states += [0 for i in range(save_state_max_depth+1, try_state_max_depth)]
if hasattr(node, 'else_') and node.else_:
self.w( self.indent() + """\
if (%(e)s.__name__ == 'TryElse') {""" % {'e': pyjs_try_err})
self.generator_add_state()
self.generator_switch_open()
self.generator_switch_case(increment=False)
for stmt in node.else_:
self._stmt(stmt, current_klass)
self.generator_switch_case(increment=True)
self.generator_switch_close()
self.generator_del_state()
self.w( self.dedent() + """} else {""")
self.indent()
if self.attribute_checking:
self.w( self.spacing() + """%s = @{{_errorMapping}}(%s);""" % (pyjs_try_err, pyjs_try_err))
self.w( self.spacing() + """\
var %(e)s_name = (typeof %(e)s.__name__ == 'undefined' ? %(e)s.name : %(e)s.__name__ );\
""" % {'e': pyjs_try_err})
self.w( self.spacing() + "$pyjs.__last_exception__ = {error: %s, module: %s};" % (pyjs_try_err, self.module_prefix[:-1]))
if self.source_tracking:
self.w( """\
%(s)sif ($pyjs.trackstack.length > $pyjs__trackstack_size_%(d)d) {
%(s)s\t$pyjs.trackstack = $pyjs.trackstack.slice(0,$pyjs__trackstack_size_%(d)d);
%(s)s\t$pyjs.track = $pyjs.trackstack.slice(-1)[0];
%(s)s}
%(s)s$pyjs.track.module='%(m)s';""" % {'s': self.spacing(), 'd': self.stacksize_depth, 'm': self.module_name})
pyjs_try_err = self.add_lookup('variable', pyjs_try_err, pyjs_try_err)
if hasattr(node, 'handlers'):
else_str = self.spacing()
if len(node.handlers) == 1 and node.handlers[0][0] is None:
else_str += "if (true) "
for handler in node.handlers:
lineno = handler[2].nodes[0].lineno
expr = handler[0]
as_ = handler[1]
if as_:
errName = as_.name
else:
errName = None
if not expr:
self.w( "%s{" % else_str)
else:
if expr.lineno:
lineno = expr.lineno
l = []
if isinstance(expr, self.ast.Tuple):
for x in expr.nodes:
l.append("((%s_name == %s.__name__)||@{{_isinstance}}(%s,%s))" % (pyjs_try_err,
self.expr(x, current_klass),pyjs_try_err, self.expr(x, current_klass)))
else:
l = [ "(%s_name == %s.__name__)||@{{_isinstance}}(%s,%s)" % (pyjs_try_err,
self.expr(expr, current_klass),pyjs_try_err, self.expr(expr, current_klass)) ]
self.w( "%sif (%s) {" % (else_str, "||".join(l)))
self.indent()
if errName:
tnode = self.ast.Assign([self.ast.AssName(errName, "OP_ASSIGN", lineno)], self.ast.Name(pyjs_try_err, lineno), lineno)
self._assign(tnode, current_klass)
self.generator_add_state()
self.generator_switch_open()
self.generator_switch_case(increment=False)
for stmt in handler[2]:
self._stmt(stmt, current_klass)
self.generator_switch_case(increment=True)
self.generator_switch_close()
self.generator_del_state()
self.w( self.dedent() + "}", False)
else_str = "else "
if node.handlers[-1][0]:
# No default catcher, create one to fall through
self.w( "%s{ $pyjs.__active_exception_stack__ = $pyjs.__last_exception_stack__; $pyjs.__last_exception_stack__ = null; throw %s; }" % (else_str, pyjs_try_err))
else:
self.w(None)
if hasattr(node, 'else_') and node.else_:
self.w( self.dedent() + "}")
final = None
if hasattr(node, 'final'):
final = node.final
if hasattr(node, 'final_'):
final = node.final_
if final is not None:
self.w( self.dedent() + "} finally {")
self.indent()
if self.is_generator:
self.w( self.spacing() + "if ($yielding === true) return $yield_value;")
#self.w( self.spacing() + "if ($yielding === null) throw $exc;")
else_except_state_max_depth = self.state_max_depth
self.generator_states = self.generator_states[:save_state_max_depth]
self.generator_states += [0 for i in range(save_state_max_depth, else_except_state_max_depth)]
self.generator_add_state()
self.generator_switch_open()
self.generator_switch_case(increment=False)
for stmt in final:
self._stmt(stmt, current_klass)
self.generator_switch_case(increment=True)
self.generator_switch_close()
self.generator_states = self.generator_states[:start_states+1]
self.w( self.dedent() + "}")
if self.is_generator:
self.w( self.spacing() + "$generator_exc[%d] = null;" % (self.try_depth, ))
self.generator_clear_state()
self.generator_del_state()
self.try_depth -= 1
self.stacksize_depth -= 1
self.generator_switch_case(increment=True)
self.is_generator = save_is_generator
def _getattr(self, v, current_klass, use_getattr=None):
if use_getattr is None:
use_getattr = self.getattr_support
attr_name = self.attrib_remap(v.attrname)
if use_getattr:
expr = self.expr(v.expr, current_klass)
return ["@{{getattr}}(%s, '%s')" % (expr, attr_name)]
if isinstance(v.expr, self.ast.Name):
obj = self._name(v.expr, current_klass, return_none_for_module=True)
if not use_getattr or attr_name == '__class__' or \
attr_name == '__name__':
return [obj, attr_name]
return ["@{{getattr}}(%s, '%s')" % (obj, attr_name)]
elif isinstance(v.expr, self.ast.Getattr):
return self._getattr(v.expr, current_klass) + [attr_name]
elif isinstance(v.expr, self.ast.Subscript):
return [self._subscript(v.expr, self.modpfx()), attr_name]
elif isinstance(v.expr, self.ast.CallFunc):
return [self._callfunc(v.expr, self.modpfx()), attr_name]
elif isinstance(v.expr, self.ast.Const):
return [self._const(v.expr), attr_name]
elif isinstance(v.expr, self.ast.List):
return [self._list(v.expr, current_klass), attr_name]
elif isinstance(v.expr, self.ast.Dict):
return [self._dict(v.expr, current_klass), attr_name]
elif isinstance(v.expr, self.ast.Tuple):
return [self._tuple(v.expr, current_klass), attr_name]
elif isinstance(v.expr, self.ast.Lambda):
return [self._lambda(v.expr, current_klass), attr_name]
elif isinstance(v.expr, self.ast.Slice):
return [self._slice(v.expr, current_klass), attr_name]
else:
raise TranslationError(
"unsupported type (in _getattr)", v.expr, self.module_name)
def modpfx(self):
return strip_py(self.module_prefix)
def _name(self, v, current_klass,
return_none_for_module=False,
optlocal_var=False,
):
if not hasattr(v, 'name'):
name = v.attrname
else:
name = v.name
name_type, pyname, jsname, depth, is_local = self.lookup(name)
if name_type is None:
# What to do with a (yet) unknown name?
# Just nothing...
if not optlocal_var:
return self.scopeName(name, depth, is_local)
return '(typeof %s == "undefined"?%s:%s)' % (
name,
self.scopeName(name, depth, is_local),
name,
)
return jsname
def _name2(self, v, current_klass, attr_name):
name_type, pyname, jsname, depth, is_local = self.lookup(v.name)
if name_type is None:
jsname = self.scopeName(v.name, depth, is_local)
return jsname, attr_name
def _getattr2(self, v, current_klass, attr_name):
if isinstance(v.expr, self.ast.Getattr):
return self._getattr2(v.expr, current_klass, v.attrname) + [attr_name]
if isinstance(v.expr, self.ast.Name):
name_type, pyname, jsname, depth, is_local = self.lookup(v.expr.name)
if name_type is None:
jsname = self.scopeName(v.expr.name, depth, is_local)
return [jsname, v.attrname, attr_name]
return [self.expr(v.expr, current_klass), v.attrname, attr_name]
def _class(self, node, parent_class = None):
save_top_level = self.top_level
if parent_class is None:
class_name = self.modpfx() + node.name
else:
class_name = node.name
self.top_level = False
local_prefix = '$cls_definition'
name_scope = {}
current_klass = Klass(class_name, name_scope)
if self.function_argument_checking or self.module_name == 'pyjslib':
current_klass.__md5__ = self.md5(node)
if len(node.bases) == 0:
base_classes = [("object", "pyjslib.object")]
else:
base_classes = []
for node_base in node.bases:
if isinstance(node_base, self.ast.Name):
node_base_name = node_base.name
base_class = self._name(node_base, None)
elif isinstance(node_base, self.ast.Getattr):
# the bases are not in scope of the class so do not
# pass our class to self._name
node_base_name = node_base.attrname
base_class = self.expr(node_base, None)
else:
raise TranslationError(
"unsupported type (in _class)",
node_base, self.module_name)
base_classes.append((node_base_name, base_class))
current_klass.set_base(base_classes[0][1])
if node.name in ['object', 'pyjslib.Object', 'pyjslib.object']:
base_classes = []
class_name = self.add_lookup('class', node.name, class_name)
self.w( self.indent() + class_name + """ = (function(){
%(s)svar %(p)s = new Object();
%(s)svar $method;
%(s)s%(p)s.__module__ = '%(module)s';""" % {'s': self.spacing(), 'p': local_prefix, 'module': self.module_name})
if self.function_argument_checking or self.module_name == 'pyjslib':
self.w( self.spacing() + "%(p)s.__md5__ = '%(m)s';" % {'p': local_prefix, 'm': current_klass.__md5__})
self.push_lookup(name_scope)
for child in node.code:
self.is_class_definition = True
self.local_prefix = local_prefix
self._stmt(child, current_klass)
self.track_lineno(node, False)
create_class = """\
%(s)svar $bases = new Array(%(bases)s);"""
if self.module_name == 'pyjslib':
create_class += """
%(s)sreturn $pyjs_type('%(n)s', $bases, %(local_prefix)s);"""
else:
create_class += """
%(s)svar $data = $p['dict']();
%(s)sfor (var $item in %(local_prefix)s) { $data.__setitem__($item, %(local_prefix)s[$item]); }
%(s)sreturn @{{_create_class}}('%(n)s', $p['tuple']($bases), $data);"""
create_class %= {'n': node.name, 's': self.spacing(), 'local_prefix': local_prefix, 'bases': ",".join(map(lambda x: x[1], base_classes))}
create_class += """
%s})();""" % self.dedent()
self.w( create_class)
self.pop_lookup()
self.is_class_definition = None
self.local_prefix = None
self.top_level = save_top_level
def classattr(self, node, current_klass):
self._assign(node, current_klass)
def _raise(self, node, current_klass):
if self.is_generator:
self.w( self.spacing() + "$generator_state[%d]=%d;" % (len(self.generator_states)-1, self.generator_states[-1]+1))
if node.expr1:
if self.source_tracking:
self.w( self.spacing() + "$pyjs.__active_exception_stack__ = null;")
if node.expr2:
if node.expr3:
self.w( """
%(s)svar $pyjs__raise_expr1 = %(expr1)s;
%(s)svar $pyjs__raise_expr2 = %(expr2)s;
%(s)svar $pyjs__raise_expr3 = %(expr3)s;
%(s)sif ($pyjs__raise_expr2 !== null && $pyjs__raise_expr1.__is_instance__ === true) {
%(s)s\tthrow @{{TypeError}}('instance exception may not have a separate value');
%(s)s}
%(s)s\tthrow ($pyjs__raise_expr1.apply($pyjs__raise_expr1, $pyjs__raise_expr2, $pyjs__raise_expr3));
""" % { 's': self.spacing(),
'expr1': self.expr(node.expr1, current_klass),
'expr2': self.expr(node.expr2, current_klass),
'expr3': self.expr(node.expr3, current_klass),
})
else:
self.w( """
%(s)svar $pyjs__raise_expr1 = %(expr1)s;
%(s)svar $pyjs__raise_expr2 = %(expr2)s;
%(s)sif ($pyjs__raise_expr2 !== null && $pyjs__raise_expr1.__is_instance__ === true) {
%(s)s\tthrow @{{TypeError}}('instance exception may not have a separate value');
%(s)s}
%(s)sif (@{{isinstance}}($pyjs__raise_expr2, $p['tuple'])) {
%(s)s\tthrow ($pyjs__raise_expr1.apply($pyjs__raise_expr1, $pyjs__raise_expr2.getArray()));
%(s)s} else {
%(s)s\tthrow ($pyjs__raise_expr1($pyjs__raise_expr2));
%(s)s}
""" % { 's': self.spacing(),
'expr1': self.expr(node.expr1, current_klass),
'expr2': self.expr(node.expr2, current_klass),
})
else:
self.w( self.spacing() + "throw (%s);" % self.expr(
node.expr1, current_klass))
else:
if self.source_tracking:
self.w( self.spacing() + "$pyjs.__active_exception_stack__ = $pyjs.__last_exception_stack__;")
self.w( self.spacing() + "$pyjs.__last_exception_stack__ = null;")
s = self.spacing()
self.w( """\
%(s)sthrow ($pyjs.__last_exception__?
%(s)s\t$pyjs.__last_exception__.error:
%(s)s\t@{{TypeError}}('exceptions must be classes, instances, or strings (deprecated), not NoneType'));\
""" % locals())
self.generator_switch_case(increment=True)
def _method(self, node, current_klass):
save_top_level = self.top_level
self.push_options()
save_has_js_return = self.has_js_return
self.has_js_return = False
save_has_yield = self.has_yield
self.has_yield = False
save_is_generator = self.is_generator
self.is_generator = False
save_generator_states = self.generator_states
self.generator_states = [0]
self.state_max_depth = len(self.generator_states)
save_local_prefix = self.local_prefix
method_name = self.attrib_remap(node.name)
jsmethod_name = self.add_lookup('method', method_name, method_name)
self.local_prefix = None
self.is_class_definition = None
staticmethod, classmethod, decorator_code = self.parse_decorators(node, method_name, current_klass)
if node.name == '__new__':
staticmethod = True
self.pop_lookup()
self.push_lookup()
arg_names = []
for arg in node.argnames:
if isinstance(arg, tuple):
for a in arg:
arg_names.append(self.add_lookup('variable', a, a))
else:
arg_names.append(self.add_lookup('variable', arg, arg))
normal_arg_names = arg_names[0:]
if node.kwargs:
kwargname = normal_arg_names.pop()
else:
kwargname = None
if node.varargs:
varargname = normal_arg_names.pop()
else:
varargname = None
declared_arg_names = list(normal_arg_names)
#if node.kwargs: declared_arg_names.append(kwargname)
if staticmethod:
function_args = "(" + ", ".join(declared_arg_names) + ")"
else:
function_args = "(" + ", ".join(declared_arg_names[1:]) + ")"
self.w( self.indent() + "$method = $pyjs__bind_method2('"+method_name+"', function" + function_args + " {")
defaults_done_by_inline = False
if staticmethod:
self._static_method_init(node, declared_arg_names, varargname, kwargname, current_klass)
elif classmethod:
self._class_method_init(node, declared_arg_names, varargname, kwargname, current_klass)
else:
if self.create_locals:
defaults_done_by_inline = True
self._instance_method_init(node, declared_arg_names, varargname, kwargname, current_klass)
# default arguments
if not defaults_done_by_inline:
self._default_args_handler(node, declared_arg_names, current_klass, kwargname, "")
local_arg_names = normal_arg_names + declared_arg_names
if node.kwargs:
local_arg_names.append(kwargname)
if node.varargs:
local_arg_names.append(varargname)
self.top_level = False
save_output = self.output
self.output = StringIO()
if self.source_tracking:
self.w( self.spacing() + "$pyjs.track={module:'%s', lineno:%d};$pyjs.trackstack.push($pyjs.track);" % (self.module_name, node.lineno))
self.track_lineno(node, True)
for child in node.code:
self._stmt(child, current_klass)
if not self.has_yield and self.source_tracking and self.has_js_return:
self.source_tracking = False
self.output = StringIO()
for child in node.code:
self._stmt(child, None)
elif self.has_yield:
if self.has_js_return:
self.source_tracking = False
self.is_generator = True
self.generator_states = [0]
self.output = StringIO()
self.indent()
if self.source_tracking:
self.w( self.spacing() + "$pyjs.track={module:'%s',lineno:%d};$pyjs.trackstack.push($pyjs.track);" % (self.module_name, node.lineno))
self.track_lineno(node, True)
self.generator_switch_open()
self.generator_switch_case(increment=False)
for child in node.code:
self._stmt(child, None)
self.generator_switch_case(increment=True)
self.generator_switch_close()
self.dedent()
captured_output = self.output.getvalue()
self.output = save_output
self.w( self.local_js_vars_decl(local_arg_names))
if self.is_generator:
self.generator(captured_output)
else:
self.w( captured_output, False)
# we need to return null always, so it is not undefined
if node.code.nodes:
lastStmt = node.code.nodes[-1]
else:
lastStmt = None
if not isinstance(lastStmt, self.ast.Return):
if self.source_tracking:
self.w( self.spacing() + "$pyjs.trackstack.pop();$pyjs.track=$pyjs.trackstack.pop();$pyjs.trackstack.push($pyjs.track);")
if not self._isNativeFunc(lastStmt):
self.w( self.spacing() + "return null;")
self.w( self.dedent() + "}")
bind_type = 'bound'
if staticmethod:
bind_type = 'static'
elif classmethod:
bind_type = 'class'
self.pop_lookup()
self.func_args(node, current_klass, None, bind_type, declared_arg_names, varargname, kwargname)
self.generator_states = save_generator_states
self.state_max_depth = len(self.generator_states)
self.is_generator = save_is_generator
self.has_yield = save_has_yield
self.has_js_return = save_has_js_return
self.pop_options()
self.push_lookup(current_klass.name_scope)
staticmethod, classmethod, decorator_code = self.parse_decorators(node, node.name, current_klass,
True, bind_type)
decorator_code = decorator_code % '$method'
self.w( self.spacing() + "%s = %s;" % (jsmethod_name, decorator_code))
self.add_lookup('method', node.name, "@{{staticmethod}}(%s)" % jsmethod_name)
self.local_prefix = save_local_prefix
self.is_class_definition = True
self.top_level = save_top_level
def _isNativeFunc(self, node):
if isinstance(node, self.ast.Discard):
if isinstance(node.expr, self.ast.CallFunc):
if isinstance(node.expr.node, self.ast.Name):
name_type, pyname, jsname, depth, is_local = self.lookup(node.expr.node.name)
if name_type == '__pyjamas__' and jsname in __pyjamas__.native_js_funcs:
return True
return False
def _exec(self, node, current_klass):
pass
def _stmt(self, node, current_klass):
self.track_lineno(node)
if isinstance(node, self.ast.Return):
self._return(node, current_klass)
elif isinstance(node, self.ast.Yield):
self._yield(node, current_klass)
elif isinstance(node, self.ast.Break):
self._break(node, current_klass)
elif isinstance(node, self.ast.Continue):
self._continue(node, current_klass)
elif isinstance(node, self.ast.Assign):
self._assign(node, current_klass)
elif isinstance(node, self.ast.AugAssign):
self._augassign(node, current_klass)
elif isinstance(node, self.ast.Discard):
self._discard(node, current_klass)
elif isinstance(node, self.ast.If):
self._if(node, current_klass)
elif isinstance(node, self.ast.For):
self._for(node, current_klass)
elif isinstance(node, self.ast.While):
self._while(node, current_klass)
elif isinstance(node, self.ast.Subscript):
self._subscript_stmt(node, current_klass)
elif isinstance(node, self.ast.Global):
self._global(node, current_klass)
elif isinstance(node, self.ast.Pass):
pass
elif isinstance(node, self.ast.Function):
self._function(node, current_klass)
elif isinstance(node, self.ast.Printnl):
self._print(node, current_klass)
elif isinstance(node, self.ast.Print):
self._print(node, current_klass)
elif isinstance(node, self.ast.TryExcept):
self._tryExcept(node, current_klass)
elif isinstance(node, self.ast.TryFinally):
self._tryFinally(node, current_klass)
elif isinstance(node, self.ast.Raise):
self._raise(node, current_klass)
elif isinstance(node, self.ast.Import):
self._import(node, current_klass)
elif isinstance(node, self.ast.From):
self._from(node, current_klass)
elif isinstance(node, self.ast.AssAttr):
self._assattr(node, current_klass)
elif isinstance(node, self.ast.Exec):
self._exec(node, current_klass)
elif isinstance(node, self.ast.Assert):
self._assert(node, current_klass)
elif isinstance(node, self.ast.Class):
self._class(node, current_klass)
#elif isinstance(node, self.ast.CallFunc):
# self._callfunc(node, current_klass)
elif isinstance(node, self.ast.Slice):
self.w( self.spacing() + self._slice(node, current_klass))
elif isinstance(node, self.ast.AssName):
# TODO: support other OP_xxx types and move this to
# a separate function
if node.flags == "OP_DELETE":
name = self._lhsFromName(node.name, current_klass)
self.w( self.spacing() + "@{{_del}}(%s);" % name)
else:
raise TranslationError(
"unsupported AssName type (in _stmt)", node, self.module_name)
elif isinstance(node, self.ast.AssTuple):
for node in node.nodes:
self._stmt(node, current_klass)
else:
raise TranslationError(
"unsupported type (in _stmt)", node, self.module_name)
def get_start_line(self, node, lineno):
if node:
if hasattr(node, "lineno") and node.lineno != None and node.lineno < lineno:
lineno = node.lineno
if hasattr(node, 'getChildren'):
for n in node.getChildren():
lineno = self.get_start_line(n, lineno)
return lineno
def get_line_trace(self, node):
lineNum1 = "Unknown"
srcLine = ""
if hasattr(node, "lineno"):
if node.lineno != None:
lineNum2 = node.lineno
lineNum1 = self.get_start_line(node, lineNum2)
srcLine = self.src[min(lineNum1, len(self.src))-1].strip()
if lineNum1 < lineNum2:
srcLine += ' ... ' + self.src[min(lineNum2, len(self.src))-1].strip()
srcLine = srcLine.replace('\\', '\\\\')
srcLine = srcLine.replace('"', '\\"')
srcLine = srcLine.replace("'", "\\'")
return self.module_name + ".py, line " \
+ str(lineNum1) + ":"\
+ "\\n" \
+ " " + srcLine
def _augassign(self, node, current_klass):
def astOP(op):
if op == "+=":
return self.ast.Add
if op == "-=":
return self.ast.Sub
if op == "*=":
return self.ast.Mul
if op == "/=":
return self.ast.Div
if op == "%=":
return self.ast.Mod
if op == "//=":
return self.ast.FloorDiv
if op == "**=":
return self.ast.Power
if self.number_classes:
if op == "&=":
return self.ast.Bitand
if op == "^=":
return self.ast.Bitxor
if op == "|=":
return self.ast.Bitor
if op == ">>=":
return self.ast.RightShift
if op == "<<=":
return self.ast.LeftShift
raise TranslationError(
"unsupported OP (in _augassign)", node, self.module_name)
v = node.node
if isinstance(v, self.ast.Getattr):
# XXX HACK! don't allow += on return result of getattr.
# TODO: create a temporary variable or something.
lhs = self.attrib_join(self._getattr(v, current_klass, False))
lhs_ass = self.ast.AssAttr(v.expr, v.attrname, "OP_ASSIGN", node.lineno)
elif isinstance(v, self.ast.Name):
lhs = self._name(v, current_klass)
lhs_ass = self.ast.AssName(v.name, "OP_ASSIGN", node.lineno)
elif isinstance(v, self.ast.Subscript) or self.operator_funcs:
if len(v.subs) != 1:
raise TranslationError(
"must have one sub (in _assign)", v, self.module_name)
lhs = self.ast.Subscript(v.expr, "OP_ASSIGN", v.subs)
expr = v.expr
subs = v.subs
if not (isinstance(v.subs[0], self.ast.Const) or \
isinstance(v.subs[0], self.ast.Name)) or \
not isinstance(v.expr, self.ast.Name):
# There's something complex here.
# Neither a simple x[0] += ?
# Nore a simple x[y] += ?
augexpr = self.uniqid('$augexpr')
augsub = self.uniqid('$augsub')
self.w( self.spacing() + "var " + augsub + " = " + self.expr(subs[0], current_klass) + ";")
self.add_lookup('variable', augexpr, augexpr)
self.w( self.spacing() + "var " + augexpr + " = " + self.expr(expr, current_klass) + ";")
self.add_lookup('variable', augsub, augsub)
lhs = self.ast.Subscript(self.ast.Name(augexpr), "OP_ASSIGN", [self.ast.Name(augsub)])
v = self.ast.Subscript(self.ast.Name(augexpr), v.flags, [self.ast.Name(augsub)])
op = astOP(node.op)
try: # python2.N
tnode = self.ast.Assign([lhs], op((v, node.expr)))
except: # lib2to3
tnode = self.ast.Assign([lhs], op(v, node.expr))
return self._assign(tnode, current_klass)
else:
raise TranslationError(
"unsupported type (in _augassign)", v, self.module_name)
try:
op_ass = astOP(node.op)
except:
op_ass = None
if not self.operator_funcs or op_ass is None:
op = node.op
rhs = self.expr(node.expr, current_klass)
self.w( self.spacing() + lhs + " " + op + " " + rhs + ";")
return
if isinstance(v, self.ast.Name):
self.add_lookup('global', v.name, lhs)
op = astOP(node.op)
try: # python2.N
tnode = self.ast.Assign([lhs_ass], op((v, node.expr)))
except: # lib2to3
tnode = self.ast.Assign([lhs_ass], op(v, node.expr))
return self._assign(tnode, current_klass)
def _lhsFromName(self, name, current_klass, set_name_type = 'variable'):
name_type, pyname, jsname, depth, is_local = self.lookup(name)
if is_local:
lhs = jsname
self.add_lookup(set_name_type, name, jsname)
elif self.top_level:
if current_klass:
lhs = current_klass.name + "." + name
else:
vname = self.modpfx() + name
vname = self.add_lookup(set_name_type, name, vname)
#lhs = "var " + name + " = " + vname
lhs = vname
else:
vname = self.add_lookup(set_name_type, name, name)
if self.create_locals:
# hmmm...
name_type, pyname, jsname, depth, is_local = self.lookup(name)
if is_local:
lhs = jsname
self.add_lookup(set_name_type, name, jsname)
else:
lhs = vname
else:
lhs = vname
return lhs
def _lhsFromAttr(self, v, current_klass):
if isinstance(v.expr, self.ast.Name):
lhs = self._name(v.expr, current_klass)
elif isinstance(v.expr, self.ast.Getattr):
lhs = self.attrib_join(self._getattr(v, current_klass, False)[:-1])
elif isinstance(v.expr, self.ast.Subscript):
lhs = self._subscript(v.expr, current_klass)
elif isinstance(v.expr, self.ast.CallFunc):
lhs = self._callfunc(v.expr, current_klass)
else:
raise TranslationError(
"unsupported type (in _assign)", v.expr, self.module_name)
return lhs
def _assign(self, node, current_klass):
if len(node.nodes) != 1:
tempvar = self.uniqid("$assign")
tnode = self.ast.Assign([self.ast.AssName(tempvar, "OP_ASSIGN", node.lineno)], node.expr, node.lineno)
self._assign(tnode, current_klass)
for v in node.nodes:
tnode2 = self.ast.Assign([v], self.ast.Name(tempvar, node.lineno), node.lineno)
self._assign(tnode2, current_klass)
return
dbg = 0
v = node.nodes[0]
if isinstance(v, self.ast.AssAttr):
attr_name = self.attrib_remap(v.attrname)
rhs = self.expr(node.expr, current_klass)
lhs = self._lhsFromAttr(v, current_klass)
if v.flags == "OP_ASSIGN":
op = "="
else:
raise TranslationError(
"unsupported flag (in _assign)", v, self.module_name)
if self.getattr_support and not self.descriptors:
# getattr support implies the use of setattr
code = "@{{setattr}}(%(l)s, '%(a)s', %(r)s);"
self.w( self.spacing() + code % {'l': lhs, 'a': attr_name, 'r': rhs})
return
if self.descriptors:
desc_setattr = [
"%(l)s.__is_instance__ &&",
"typeof %(l)s.__setattr__ == 'function' ?",
"%(l)s.__setattr__('%(a)s', %(r)s) :",
"@{{setattr}}(%(l)s, '%(a)s', %(r)s);",
]
self.w( self.spacing() + ' '.join(desc_setattr) % {'l': lhs, 'a': attr_name, 'r': rhs})
return
lhs += '.' + attr_name
elif isinstance(v, self.ast.AssName):
rhs = self.expr(node.expr, current_klass)
lhs = self._lhsFromName(v.name, current_klass)
if v.flags == "OP_ASSIGN":
op = "="
else:
raise TranslationError(
"unsupported flag (in _assign)", v, self.module_name)
elif isinstance(v, self.ast.Subscript):
if v.flags == "OP_ASSIGN":
obj = self.expr(v.expr, current_klass)
if len(v.subs) != 1:
raise TranslationError(
"must have one sub (in _assign)", v, self.module_name)
idx = self.expr(v.subs[0], current_klass)
value = self.expr(node.expr, current_klass)
self.w( self.spacing() + self.track_call(obj + ".__setitem__(" + idx + ", " + value + ")", v.lineno) + ';')
return
else:
raise TranslationError(
"unsupported flag (in _assign)", v, self.module_name)
elif isinstance(v, self.ast.Slice):
if v.flags == "OP_ASSIGN":
if not v.lower:
lower = 0
else:
lower = self.expr(v.lower, current_klass)
if not v.upper:
upper = 'null'
else:
upper = self.expr(v.upper, current_klass)
obj = self.expr(v.expr, current_klass)
value = self.expr(node.expr, current_klass)
self.w( self.spacing() + self.track_call("@{{__setslice}}(%s, %s, %s, %s)" % (obj, lower, upper, value), v.lineno) + ';')
return
else:
raise TranslationError(
"unsupported flag (in _assign)", v, self.module_name)
elif isinstance(v, (self.ast.AssList, self.ast.AssTuple)):
tempName = self.uniqid("$tupleassign")
self.w( self.spacing() + "var " + tempName + " = " + \
self.expr(node.expr, current_klass) + ";")
for index,child in enumerate(v.getChildNodes()):
rhs = self.track_call(tempName + ".__getitem__(" + str(index) + ")", v.lineno)
if isinstance(child, self.ast.AssAttr):
lhs = self._lhsFromAttr(child, current_klass) + '.' + self.attrib_remap(child.attrname)
elif isinstance(child, self.ast.AssName):
lhs = self._lhsFromName(child.name, current_klass)
elif isinstance(child, self.ast.Subscript):
if child.flags == "OP_ASSIGN":
obj = self.expr(child.expr, current_klass)
if len(child.subs) != 1:
raise TranslationError("must have one sub " +
"(in _assign)",
child,
self.module_name)
idx = self.expr(child.subs[0], current_klass)
value = self.expr(node.expr, current_klass)
self.w( self.spacing() + self.track_call(obj + ".__setitem__(" \
+ idx + ", " + rhs + ")", v.lineno) + ';')
continue
elif isinstance(child, self.ast.Slice):
if child.flags == "OP_ASSIGN":
if not child.lower:
lower = 0
else:
lower = self.expr(child.lower, current_klass)
if not child.upper:
upper = 'null'
else:
upper = self.expr(child.upper, current_klass)
obj = self.expr(child.expr, current_klass)
self.w( self.spacing()
+ self.track_call("@{{__setslice}}"
"(%s, %s, %s, %s)"
% (obj, lower, upper, rhs)
, v.lineno) + ';')
continue
else:
raise TranslationError(
"unsupported flag (in _assign)", v, self.module_name)
else:
raise TranslationError(
"unsupported type in assignment list",
v, self.module_name)
self.w( self.spacing() + lhs + " = " + rhs + ";")
return
else:
raise TranslationError(
"unsupported type (in _assign)", v, self.module_name)
if dbg:
print "b", repr(node.expr), rhs
self.w( self.spacing() + lhs + " " + op + " " + rhs + ";")
def _discard(self, node, current_klass):
if isinstance(node.expr, self.ast.CallFunc):
expr = self._callfunc(
node.expr,
current_klass,
is_statement=True,
optlocal_var=isinstance(node.expr.node, self.ast.Name),
)
if isinstance(node.expr.node, self.ast.Name):
name_type, pyname, jsname, depth, is_local = self.lookup(node.expr.node.name)
if name_type == '__pyjamas__' and \
jsname in __pyjamas__.native_js_funcs:
self.w( expr)
return
self.w( self.spacing() + expr + ";")
elif isinstance(node.expr, self.ast.Const):
# we can safely remove all constants that are discarded,
# e.g None fo empty expressions after a unneeded ";" or
# mostly important to remove doc strings
if node.expr.value in ["@CONSTANT_DECLARATION@", "@ATTRIB_REMAP_DECLARATION@"]:
self.w( node.expr.value)
return
elif isinstance(node.expr, self.ast.Yield):
self._yield(node.expr, current_klass)
else:
raise TranslationError(
"unsupported type, must be call or const (in _discard)", node.expr, self.module_name)
def _if(self, node, current_klass):
save_is_generator = self.is_generator
if self.is_generator:
self.is_generator = self.compiler.walk(node, GeneratorExitVisitor(), walker=GeneratorExitVisitor()).has_yield
if self.is_generator:
self.w( self.spacing() + "$generator_state[%d] = 0;" % (len(self.generator_states)+1,))
self.generator_switch_case(increment=True)
self.generator_add_state()
for i in range(len(node.tests)):
test, consequence = node.tests[i]
if i == 0:
keyword = "if"
else:
keyword = "else if"
self.lookup_stack[-1]
self._if_test(keyword, test, consequence, node, current_klass)
if node.else_:
keyword = "else"
test = None
consequence = node.else_
self._if_test(keyword, test, consequence, node, current_klass)
if self.is_generator:
self.w( self.spacing() + "$generator_state[%d]=0;" % (len(self.generator_states)-1,))
self.generator_del_state()
self.is_generator = save_is_generator
def _if_test(self, keyword, test, consequence, node, current_klass):
if test:
expr = self.expr(test, current_klass)
if not self.is_generator:
self.w( self.indent() +keyword + " (" + self.track_call(self.inline_bool_code(expr), test.lineno)+") {")
else:
self.generator_states[-1] += 1
self.w( self.indent() +keyword + "(($generator_state[%d]==%d)||($generator_state[%d]<%d&&(" % (\
len(self.generator_states)-1, self.generator_states[-1], len(self.generator_states)-1, self.generator_states[-1],) + \
self.track_call(self.inline_bool_code(expr), test.lineno)+"))) {")
self.w( self.spacing() + "$generator_state[%d]=%d;" % (len(self.generator_states)-1, self.generator_states[-1]))
else:
if not self.is_generator:
self.w( self.indent() + keyword + " {")
else:
self.generator_states[-1] += 1
self.w( self.indent() + keyword + " if ($generator_state[%d]==0||$generator_state[%d]==%d) {" % (\
len(self.generator_states)-1, len(self.generator_states)-1, self.generator_states[-1], ))
self.w( self.spacing() + "$generator_state[%d]=%d;" % (len(self.generator_states)-1, self.generator_states[-1]))
if self.is_generator:
self.generator_add_state()
self.generator_switch_open()
self.generator_switch_case(increment=False)
if isinstance(consequence, self.ast.Stmt):
for child in consequence.nodes:
self._stmt(child, current_klass)
else:
raise TranslationError(
"unsupported type (in _if_test)", consequence, self.module_name)
if self.is_generator:
self.generator_switch_case(increment=True)
self.generator_switch_close()
self.generator_del_state()
self.w( self.dedent() + "}")
def _compare(self, node, current_klass):
lhs = self.expr(node.expr, current_klass)
if len(node.ops) != 1:
cmp = []
for op, rhs_node in node.ops:
rhsname = self.uniqid("$compare")
rhs = self.expr(rhs_node, current_klass)
rhs = "(%s = %s)" % (rhsname, rhs)
cmp.append(self.compare_code(op, lhs, rhs))
lhs = rhsname
return "(%s)" % "&&".join(cmp)
raise TranslationError(
"only one ops supported (in _compare)", node, self.module_name)
op = node.ops[0][0]
rhs_node = node.ops[0][1]
rhs = self.expr(rhs_node, current_klass)
return self.compare_code(op, lhs, rhs)
def compare_code(self, op, lhs, rhs):
if op == "==":
if not self.stupid_mode:
return self.inline_eq_code(lhs, rhs)
if op == "!=":
if not self.stupid_mode:
return "!"+self.inline_eq_code(lhs, rhs)
if op == "<":
if not self.stupid_mode:
return "(%s == -1)" % self.inline_cmp_code(lhs, rhs)
if op == "<=":
if not self.stupid_mode:
return "(%s < 1)" % self.inline_cmp_code(lhs, rhs)
if op == ">":
if not self.stupid_mode:
return "(%s == 1)" % self.inline_cmp_code(lhs, rhs)
if op == ">=":
if not self.stupid_mode:
return "(((%s)|1) == 1)" % self.inline_cmp_code(lhs, rhs)
if op == "in":
return rhs + ".__contains__(" + lhs + ")"
elif op == "not in":
return "!" + rhs + ".__contains__(" + lhs + ")"
if op == "is":
if self.number_classes:
return "@{{op_is}}(%s, %s)" % (lhs, rhs)
op = "==="
if op == "is not":
if self.number_classes:
return "!@{{op_is}}(%s, %s)" % (lhs, rhs)
op = "!=="
return "(" + lhs + " " + op + " " + rhs + ")"
def _not(self, node, current_klass):
expr = self.expr(node.expr, current_klass)
if self.stupid_mode:
return "(!(%s))" % expr
return "!" + self.inline_bool_code(expr)
def _or(self, node, current_klass):
if self.stupid_mode:
return " || ".join(map(bracket_fn, [self.expr(child, current_klass) for child in node.nodes]))
s = self.spacing()
expr = "@EXPR@"
for e in [self.expr(child, current_klass) for child in node.nodes[:-1]]:
v = self.uniqid('$or')
self.add_lookup('variable', v, v)
bool = self.inline_bool_code("%(v)s=%(e)s" % locals())
expr = expr.replace('@EXPR@', "(%(bool)s?%(v)s:@EXPR@)" % locals())
v = self.uniqid('$or')
self.add_lookup('variable', v, v)
return expr.replace('@EXPR@', self.expr(node.nodes[-1], current_klass))
expr = ",".join([self.expr(child, current_klass) for child in node.nodes])
return "@{{op_or}}([%s])" % expr
def _and(self, node, current_klass):
if self.stupid_mode:
return " && ".join(map(bracket_fn, [self.expr(child, current_klass) for child in node.nodes]))
s = self.spacing()
expr = "@EXPR@"
for e in [self.expr(child, current_klass) for child in node.nodes[:-1]]:
v = self.uniqid('$and')
self.add_lookup('variable', v, v)
bool = self.inline_bool_code("%(v)s=%(e)s" % locals())
expr = expr.replace('@EXPR@', "(%(bool)s?@EXPR@:%(v)s)" % locals())
v = self.uniqid('$and')
self.add_lookup('variable', v, v)
return expr.replace('@EXPR@', self.expr(node.nodes[-1], current_klass))
expr = ",".join([self.expr(child, current_klass) for child in node.nodes])
return "@{{op_and}}([%s])" % expr
def _for(self, node, current_klass):
save_is_generator = self.is_generator
if self.is_generator:
self.is_generator = self.compiler.walk(node, GeneratorExitVisitor(), walker=GeneratorExitVisitor()).has_yield
assign_name = ""
assign_tuple = []
iterid = self.uniqid('$iter')
iterator_name = "%s_iter" % iterid
self.add_lookup('variable', iterator_name, iterator_name)
nextval = "%s_nextval" % iterid
self.add_lookup('variable', nextval, nextval)
gentype = "%s_type" % iterid
self.add_lookup('variable', gentype, gentype)
array = "%s_array" % iterid
self.add_lookup('variable', array, array)
loopvar = "%s_idx" % iterid
self.add_lookup('variable', loopvar, loopvar)
if node.else_:
testvar = "%s_test" % iterid
self.add_lookup('variable', testvar, testvar)
assTestvar = "%s_test = " % iterid
else:
assTestvar = ""
reuse_tuple = "false"
if isinstance(node.assign, self.ast.AssName):
assign_name = self.add_lookup('variable', node.assign.name, node.assign.name)
if node.assign.flags == "OP_ASSIGN":
op = "="
elif isinstance(node.assign, self.ast.AssTuple):
reuse_tuple = "true"
op = "="
i = 0
for child in node.assign:
if not isinstance(child, self.ast.AssName):
raise TranslationError(
"deep unpacking not supported (in _for)", child, self.module_name)
child_name = child.name
self.add_lookup('variable', child_name, child_name)
child_name = self.add_lookup('variable', child_name, child_name)
if self.inline_code:
assign_tuple.append("""%(child_name)s %(op)s %(nextval)s.__array[%(i)i];""" % locals())
else:
assign_tuple.append("""%(child_name)s %(op)s %(nextval)s.$nextval.__array[%(i)i];""" % locals())
i += 1
else:
raise TranslationError(
"unsupported type (in _for)", node.assign, self.module_name)
if isinstance(node.list, self.ast.Name):
list_expr = self._name(node.list, current_klass)
elif isinstance(node.list, self.ast.Getattr):
list_expr = self.attrib_join(self._getattr(node.list, current_klass))
elif isinstance(node.list, self.ast.CallFunc):
list_expr = self._callfunc(node.list, current_klass)
elif isinstance(node.list, self.ast.Subscript):
list_expr = self._subscript(node.list, current_klass)
elif isinstance(node.list, self.ast.Const):
list_expr = self._const(node.list)
elif isinstance(node.list, self.ast.List):
list_expr = self._list(node.list, current_klass)
elif isinstance(node.list, self.ast.Slice):
list_expr = self._slice(node.list, current_klass)
elif isinstance(node.list, self.ast.ListComp):
list_expr = self._listcomp(node.list, current_klass)
elif isinstance(node.list, self.ast.Tuple):
list_expr = self._tuple(node.list, current_klass)
elif isinstance(node.list, self.ast.Add):
list_expr = self._add(node.list, current_klass)
else:
raise TranslationError(
"unsupported type (in _for)", node.list, self.module_name)
if not assign_tuple:
assign_name = self.add_lookup('variable', assign_name, assign_name)
if self.source_tracking:
self.stacksize_depth += 1
var_trackstack_size = "$pyjs__trackstack_size_%d" % self.stacksize_depth
self.add_lookup('variable', var_trackstack_size, var_trackstack_size)
self.w( self.spacing() + "%s=$pyjs.trackstack.length;" % var_trackstack_size)
s = self.spacing()
if self.inline_code:
self.w( """\
%(s)s%(iterator_name)s = """ % locals() + self.track_call("%(list_expr)s" % locals(), node.lineno) + ';')
self.w( """\
%(s)sif (typeof (%(array)s = %(iterator_name)s.__array) != 'undefined') {
%(s)s\t%(gentype)s = 0;
%(s)s} else {
%(s)s\t%(iterator_name)s = %(iterator_name)s.__iter__();
%(s)s\t%(gentype)s = typeof (%(array)s = %(iterator_name)s.__array) != 'undefined'? 0 : (typeof %(iterator_name)s.$genfunc == 'function'? 1 : -1);
%(s)s}
%(s)s%(loopvar)s = 0;""" % locals())
condition = "typeof (%(nextval)s=(%(gentype)s?(%(gentype)s > 0?%(iterator_name)s.next(true,%(reuse_tuple)s):@{{wrapped_next}}(%(iterator_name)s)):%(array)s[%(loopvar)s++])) != 'undefined'" % locals()
else:
self.w( """\
%(s)s%(iterator_name)s = """ % locals() + self.track_call("%(list_expr)s" % locals(), node.lineno) + ';')
self.w( """\
%(s)s%(nextval)s=@{{__iter_prepare}}(%(iterator_name)s,%(reuse_tuple)s);\
""" % locals())
condition = "typeof(@{{__wrapped_next}}(%(nextval)s).$nextval) != 'undefined'" % locals()
self.generator_switch_case(increment=True)
if self.is_generator:
self.w( self.spacing() + "$generator_state[%d] = 0;" % (len(self.generator_states), ))
self.generator_switch_case(increment=True)
self.w( self.indent() + "for (;%s($generator_state[%d] > 0 || %s);$generator_state[%d] = 0) {" % (assTestvar, len(self.generator_states), condition, len(self.generator_states), ))
else:
self.w( self.indent() + """while (%s%s) {""" % (assTestvar, condition))
self.generator_add_state()
self.generator_switch_open()
self.generator_switch_case(increment=False)
if not assign_tuple:
if self.inline_code:
self.w( self.spacing() + """%(assign_name)s %(op)s %(nextval)s;""" % locals())
else:
self.w( self.spacing() + """%(assign_name)s %(op)s %(nextval)s.$nextval;""" % locals())
else:
for line in assign_tuple:
self.w( self.spacing() + line)
for n in node.body.nodes:
self._stmt(n, current_klass)
self.generator_switch_case(increment=True)
self.generator_switch_close()
self.generator_del_state()
self.w( self.dedent() + "}")
if node.else_:
self.generator_switch_case(increment=True)
self.w( self.indent() + "if (!%(testvar)s) {" % locals())
for n in node.else_.nodes:
self._stmt(n, current_klass)
self.w( self.dedent() + "}")
if self.source_tracking:
self.w( """\
%(s)sif ($pyjs.trackstack.length > $pyjs__trackstack_size_%(d)d) {
%(s)s\t$pyjs.trackstack = $pyjs.trackstack.slice(0,$pyjs__trackstack_size_%(d)d);
%(s)s\t$pyjs.track = $pyjs.trackstack.slice(-1)[0];
%(s)s}
%(s)s$pyjs.track.module='%(m)s';""" % {'s': self.spacing(), 'd': self.stacksize_depth, 'm': self.module_name})
self.stacksize_depth -= 1
self.generator_switch_case(increment=True)
self.is_generator = save_is_generator
def _while(self, node, current_klass):
save_is_generator = self.is_generator
if self.is_generator:
self.is_generator = self.compiler.walk(node, GeneratorExitVisitor(), walker=GeneratorExitVisitor()).has_yield
test = self.expr(node.test, current_klass)
if self.is_generator:
self.generator_switch_case(increment=True)
self.generator_reset_state()
self.generator_switch_case(increment=True)
self.w( self.indent() + "for (;($generator_state[%d] > 0)||(" % (\
(len(self.generator_states),)) + \
self.track_call(self.inline_bool_code(test), node.lineno) + ");$generator_state[%d] = 0) {" % (len(self.generator_states), ))
self.generator_add_state()
self.generator_switch_open()
self.generator_switch_case(increment=False)
else:
self.w( self.indent() + "while (" + self.track_call(self.inline_bool_code(test), node.lineno) + ") {")
if isinstance(node.body, self.ast.Stmt):
for child in node.body.nodes:
self._stmt(child, current_klass)
else:
raise TranslationError(
"unsupported type (in _while)", node.body, self.module_name)
if self.is_generator:
self.generator_switch_case(increment=True)
self.generator_switch_close()
self.generator_del_state()
self.w( self.dedent() + "}")
self.generator_switch_case(increment=True)
self.is_generator = save_is_generator
def _const(self, node):
if isinstance(node.value, int):
if not self.number_classes:
return str(node.value)
self.constant_int[node.value] = 1
return "$constant_int_%s" % str(node.value)
elif isinstance(node.value, long):
v = str(node.value)
if v[-1] == 'L':
v = v[:-1]
if not self.number_classes:
return v
self.constant_long[node.value] = 1
return "$constant_long_%s" % v
elif isinstance(node.value, float):
return str(node.value)
elif isinstance(node.value, basestring):
v = node.value
if isinstance(node.value, unicode):
v = v.encode('utf-8')
return "'%s'" % escapejs(v)
elif node.value is None:
return "null"
else:
raise TranslationError(
"unsupported type (in _const)", node, self.module_name)
def _unaryadd(self, node, current_klass):
if not self.operator_funcs:
return "(%s)" % self.expr(node.expr, current_klass)
e = self.expr(node.expr, current_klass)
v = self.uniqid('$uadd')
s = self.spacing()
return """(typeof (%(v)s=%(e)s)=='number'?
%(s)s\t%(v)s:
%(s)s\t@{{op_uadd}}(%(v)s))""" % locals()
def _unarysub(self, node, current_klass):
if not self.operator_funcs:
return "-(%s)" % self.expr(node.expr, current_klass)
e = self.expr(node.expr, current_klass)
v = self.uniqid('$usub')
s = self.spacing()
return """(typeof (%(v)s=%(e)s)=='number'?
%(s)s\t-%(v)s:
%(s)s\t@{{op_usub}}(%(v)s))""" % locals()
def _add(self, node, current_klass):
if not self.operator_funcs:
return "(%s)+(%s)" % (self.expr(node.left, current_klass), self.expr(node.right, current_klass))
e1 = self.expr(node.left, current_klass)
e2 = self.expr(node.right, current_klass)
v1 = self.uniqid('$add')
v2 = self.uniqid('$add')
self.add_lookup('variable', v1, v1)
self.add_lookup('variable', v2, v2)
s = self.spacing()
if self.inline_code:
return """(typeof (%(v1)s=%(e1)s)==typeof (%(v2)s=%(e2)s) && (typeof %(v1)s=='number'||typeof %(v1)s=='string')?
%(s)s\t%(v1)s+%(v2)s:
%(s)s\t@{{op_add}}(%(v1)s,%(v2)s))""" % locals()
return """@{{__op_add}}(%(v1)s=%(e1)s,%(v2)s=%(e2)s)""" % \
locals()
def _sub(self, node, current_klass):
if not self.operator_funcs:
return "(%s)-(%s)" % (self.expr(node.left, current_klass), self.expr(node.right, current_klass))
e1 = self.expr(node.left, current_klass)
e2 = self.expr(node.right, current_klass)
v1 = self.uniqid('$sub')
v2 = self.uniqid('$sub')
self.add_lookup('variable', v1, v1)
self.add_lookup('variable', v2, v2)
s = self.spacing()
if self.inline_code:
return """(typeof (%(v1)s=%(e1)s)==typeof (%(v2)s=%(e2)s) && (typeof %(v1)s=='number'||typeof %(v1)s=='string')?
%(s)s\t%(v1)s-%(v2)s:
%(s)s\t@{{op_sub}}(%(v1)s,%(v2)s))""" % locals()
return """@{{__op_sub}}(%(v1)s=%(e1)s,%(v2)s=%(e2)s)""" % \
locals()
def _floordiv(self, node, current_klass):
if not self.operator_funcs:
return "Math.floor(%s/%s)" % (self.expr(node.left, current_klass), self.expr(node.right, current_klass))
e1 = self.expr(node.left, current_klass)
e2 = self.expr(node.right, current_klass)
v1 = self.uniqid('$floordiv')
v2 = self.uniqid('$floordiv')
self.add_lookup('variable', v1, v1)
self.add_lookup('variable', v2, v2)
s = self.spacing()
return """(typeof (%(v1)s=%(e1)s)==typeof (%(v2)s=%(e2)s) && typeof %(v1)s=='number' && %(v2)s !== 0?
%(s)s\tMath.floor(%(v1)s/%(v2)s):
%(s)s\t@{{op_floordiv}}(%(v1)s,%(v2)s))""" % locals()
def _div(self, node, current_klass):
if not self.operator_funcs:
return "(%s)/(%s)" % (self.expr(node.left, current_klass), self.expr(node.right, current_klass))
e1 = self.expr(node.left, current_klass)
e2 = self.expr(node.right, current_klass)
v1 = self.uniqid('$div')
v2 = self.uniqid('$div')
self.add_lookup('variable', v1, v1)
self.add_lookup('variable', v2, v2)
s = self.spacing()
op_div = 'op_div' if self.future_division else 'op_div'
op_div = 'op_truediv' if self.future_division else 'op_div'
return """(typeof (%(v1)s=%(e1)s)==typeof (%(v2)s=%(e2)s) && typeof %(v1)s=='number' && %(v2)s !== 0?
%(s)s\t%(v1)s/%(v2)s:
%(s)s\t@{{%(op_div)s}}(%(v1)s,%(v2)s))""" % locals()
def _mul(self, node, current_klass):
if not self.operator_funcs:
return "(%s)*(%s)" % (self.expr(node.left, current_klass), self.expr(node.right, current_klass))
e1 = self.expr(node.left, current_klass)
e2 = self.expr(node.right, current_klass)
v1 = self.uniqid('$mul')
v2 = self.uniqid('$mul')
self.add_lookup('variable', v1, v1)
self.add_lookup('variable', v2, v2)
s = self.spacing()
return """(typeof (%(v1)s=%(e1)s)==typeof (%(v2)s=%(e2)s) && typeof %(v1)s=='number'?
%(s)s\t%(v1)s*%(v2)s:
%(s)s\t@{{op_mul}}(%(v1)s,%(v2)s))""" % locals()
def _mod(self, node, current_klass):
if isinstance(node.left, self.ast.Const) and isinstance(node.left.value, StringType):
return self.track_call("@{{sprintf}}("+self.expr(node.left, current_klass) + ", " + self.expr(node.right, current_klass)+")", node.lineno)
e1 = self.expr(node.left, current_klass)
e2 = self.expr(node.right, current_klass)
if self.stupid_mode:
return "(%(e1)s) %% (%(e2)s)" % locals()
v1 = self.uniqid('$mod')
v2 = self.uniqid('$mod')
self.add_lookup('variable', v1, v1)
self.add_lookup('variable', v2, v2)
s = self.spacing()
if not self.operator_funcs:
return """((%(v1)s=%(e1)s)!=null && (%(v2)s=%(e2)s)!=null && typeof %(v1)s=='string'?
%(s)s\t@{{sprintf}}(%(v1)s,%(v2)s):
%(s)s\t((%(v1)s=%(v1)s%%%(v2)s)<0&&%(v2)s>0?%(v1)s+%(v2)s:%(v1)s))""" % locals()
return """(typeof (%(v1)s=%(e1)s)==typeof (%(v2)s=%(e2)s) && typeof %(v1)s=='number'?
%(s)s\t((%(v1)s=%(v1)s%%%(v2)s)<0&&%(v2)s>0?%(v1)s+%(v2)s:%(v1)s):
%(s)s\t@{{op_mod}}(%(v1)s,%(v2)s))""" % locals()
def _power(self, node, current_klass):
if not self.operator_funcs:
return "Math.pow(%s,%s)" % (self.expr(node.left, current_klass), self.expr(node.right, current_klass))
e1 = self.expr(node.left, current_klass)
e2 = self.expr(node.right, current_klass)
v1 = self.uniqid('$pow')
v2 = self.uniqid('$pow')
self.add_lookup('variable', v1, v1)
self.add_lookup('variable', v2, v2)
s = self.spacing()
return """(typeof (%(v1)s=%(e1)s)==typeof (%(v2)s=%(e2)s) && typeof %(v1)s=='number'?
%(s)s\tMath.pow(%(v1)s,%(v2)s):
%(s)s\t@{{op_pow}}(%(v1)s,%(v2)s))""" % locals()
def _invert(self, node, current_klass):
if not self.operator_funcs or not self.number_classes:
return "~(%s)" % self.expr(node.expr, current_klass)
return "@{{op_invert}}(%s)" % self.expr(node.expr, current_klass)
def _bitshiftleft(self, node, current_klass):
if not self.operator_funcs or not self.number_classes:
return "(%s)<<(%s)"% (self.expr(node.left, current_klass), self.expr(node.right, current_klass))
return "@{{op_bitshiftleft}}(%s,%s)" % (self.expr(node.left, current_klass), self.expr(node.right, current_klass))
def _bitshiftright(self, node, current_klass):
if not self.operator_funcs or not self.number_classes:
return "(%s)>>(%s)" % (self.expr(node.left, current_klass), self.expr(node.right, current_klass))
return "@{{op_bitshiftright}}(%s,%s)" % (self.expr(node.left, current_klass), self.expr(node.right, current_klass))
def _bitand(self, node, current_klass):
if not self.operator_funcs or not self.number_classes:
return "(%s)" % ")&(".join([self.expr(child, current_klass) for child in node.nodes])
if len(node.nodes) == 2:
return "@{{op_bitand2}}(%s, %s)" % (self.expr(node.nodes[0], current_klass), self.expr(node.nodes[1], current_klass))
return "@{{op_bitand}}([%s])" % ", ".join([self.expr(child, current_klass) for child in node.nodes])
def _bitxor(self,node, current_klass):
if not self.operator_funcs or not self.number_classes:
return "(%s)" % ")^(".join([self.expr(child, current_klass) for child in node.nodes])
if len(node.nodes) == 2:
return "@{{op_bitxor2}}(%s, %s)" % (self.expr(node.nodes[0], current_klass), self.expr(node.nodes[1], current_klass))
return "@{{op_bitxor}}([%s])" % ", ".join([self.expr(child, current_klass) for child in node.nodes])
def _bitor(self, node, current_klass):
if not self.operator_funcs or not self.number_classes:
return "(%s)" % ")|(".join([self.expr(child, current_klass) for child in node.nodes])
if len(node.nodes) == 2:
return "@{{op_bitor2}}(%s, %s)" % (self.expr(node.nodes[0], current_klass), self.expr(node.nodes[1], current_klass))
return "@{{op_bitor}}([%s])" % ", ".join([self.expr(child, current_klass) for child in node.nodes])
def _subscript(self, node, current_klass):
if node.flags == "OP_APPLY":
if len(node.subs) == 1:
return self.inline_getitem_code(self.expr(node.expr, current_klass), self.expr(node.subs[0], current_klass))
else:
raise TranslationError(
"must have one sub (in _subscript)", node, self.module_name)
else:
raise TranslationError(
"unsupported flag (in _subscript)", node, self.module_name)
def _subscript_stmt(self, node, current_klass):
if node.flags == "OP_DELETE":
self.w( self.spacing() + self.track_call(self.expr(node.expr, current_klass) + ".__delitem__(" + self.expr(node.subs[0], current_klass) + ")", node.lineno) + ';')
else:
raise TranslationError(
"unsupported flag (in _subscript)", node, self.module_name)
def _assattr(self, node, current_klass):
attr_name = self.attrib_remap(node.attrname)
lhs = self._lhsFromAttr(node, current_klass)
if node.flags == "OP_DELETE":
self.w( self.spacing() + "@{{delattr}}(%s, '%s');" % (lhs, attr_name))
else:
raise TranslationError(
"unsupported flag (in _assign)", v, self.module_name)
def _assname(self, node, current_klass):
name_type, pyname, jsname, depth, is_local = self.lookup(node.name)
if node.flags == "OP_DELETE":
self.w( self.spacing() + "delete %s;" % (jsname,))
else:
raise TranslationError(
"unsupported flag (in _assign)", v, self.module_name)
def _list(self, node, current_klass):
return self.track_call("$p['list']([" + ", ".join([self.expr(x, current_klass) for x in node.nodes]) + "])", node.lineno)
def _dict(self, node, current_klass):
items = []
for x in node.items:
key = self.expr(x[0], current_klass)
value = self.expr(x[1], current_klass)
items.append("[" + key + ", " + value + "]")
return self.track_call("$p['dict']([" + ", ".join(items) + "])")
def _tuple(self, node, current_klass):
return self.track_call("$p['tuple']([" + ", ".join([self.expr(x, current_klass) for x in node.nodes]) + "])", node.lineno)
def _lambda(self, node, current_klass):
save_local_prefix, self.local_prefix = self.local_prefix, None
save_is_class_definition, self.is_class_definition = self.is_class_definition, False
function_name = self.uniqid("$lambda")
self.w( self.spacing() + "var", False)
code_node = self.ast.Stmt([self.ast.Return(node.code, node.lineno)], node.lineno)
try: # python2.N
func_node = self.ast.Function(None, function_name, node.argnames, node.defaults, node.flags, None, code_node, node.lineno)
except: # lib2to3
func_node = self.ast.Function(None, function_name, node.argnames, node.defaults, node.varargs, node.kwargs, None, code_node, node.lineno)
self._function(func_node, current_klass, True)
self.local_prefix = save_local_prefix
self.is_class_definition = save_is_class_definition
return function_name
def _listcomp(self, node, current_klass):
self.push_lookup()
resultlist = self.uniqid("$listcomp")
self.add_lookup('variable', resultlist, resultlist)
save_output = self.output
self.output = StringIO()
tnode = self.ast.Discard(self.ast.CallFunc(self.ast.Getattr(self.ast.Name(resultlist), 'append'), [node.expr], None, None))
for qual in node.quals[::-1]:
if len(qual.ifs) > 1:
raise TranslationError(
"unsupported ifs (in _listcomp)", node, self.module_name)
tassign = qual.assign
tlist = qual.list
tbody = self.ast.Stmt([tnode])
if len(qual.ifs) == 1:
tbody = self.ast.Stmt([self.ast.If([(qual.ifs[0].test, tbody)], None, qual.ifs[0].lineno)])
telse_ = None
tnode = self.ast.For(tassign, tlist, tbody, telse_, node.lineno)
self._for(tnode, current_klass)
captured_output = self.output
self.output = save_output
listcomp_code = """\
function(){
\t%s
\t%s = $p['list']();
%s
\treturn %s;}()""" % (
self.local_js_vars_decl([]),
resultlist,
captured_output.getvalue(),
resultlist,
)
self.pop_lookup()
return listcomp_code
def _genexpr(self, node, current_klass):
save_has_yield = self.has_yield
self.has_yield = True
save_is_generator = self.is_generator
self.is_generator = True
save_generator_states = self.generator_states
self.generator_states = [0]
self.state_max_depth = len(self.generator_states)
self.push_options()
self.source_tracking = self.debug = False
if not isinstance(node.code, self.ast.GenExprInner):
raise TranslationError(
"unsupported code (in _genexpr)", node, self.module_name)
if node.argnames != ['.0']:
raise TranslationError(
"argnames not supported (in _genexpr)", node, self.module_name)
if node.kwargs:
raise TranslationError(
"kwargs not supported (in _genexpr)", node, self.module_name)
if node.varargs:
raise TranslationError(
"varargs not supported (in _genexpr)", node, self.module_name)
save_output = self.output
self.output = StringIO()
self.indent()
self.generator_switch_open()
self.generator_switch_case(increment=False)
tnode = self.ast.Yield(node.code.expr, node.lineno)
for qual in node.code.quals[::-1]:
if isinstance(qual, self.ast.GenExprFor):
if len(qual.ifs) > 1:
raise TranslationError(
"unsupported ifs (in _genexpr)", node.code, self.module_name)
tassign = qual.assign
titer = qual.iter
tbody = self.ast.Stmt([tnode])
tis_outmost = qual.is_outmost
if len(qual.ifs) == 1:
tbody = self.ast.Stmt([self.ast.If([(qual.ifs[0].test, tbody)], None, qual.ifs[0].lineno)])
telse_ = None
tnode = self.ast.For(tassign, titer, tbody, telse_, node.lineno)
self._for(tnode, current_klass)
else:
raise TranslationError(
"unsupported quals (in _genexpr)", node.code, self.module_name)
self.generator_switch_case(increment=True)
self.generator_switch_close()
captured_output = self.output.getvalue()
self.output = StringIO()
self.w( "function(){")
self.generator(captured_output)
self.w( self.dedent() + "}()")
captured_output = self.output.getvalue()
self.output = save_output
self.generator_states = save_generator_states
self.state_max_depth = len(self.generator_states)
self.is_generator = save_is_generator
self.has_yield = save_has_yield
self.pop_options()
return captured_output
def _slice(self, node, current_klass):
lower = "0"
upper = "null"
if node.lower != None:
lower = self.expr(node.lower, current_klass)
if node.upper != None:
upper = self.expr(node.upper, current_klass)
if node.flags == "OP_APPLY":
return "@{{slice}}(" + self.expr(node.expr, current_klass) + ", " + lower + ", " + upper + ")"
elif node.flags == "OP_DELETE":
return "@{{__delslice}}(" + self.expr(node.expr, current_klass) + ", " + lower + ", " + upper + ");"
else:
raise TranslationError(
"unsupported flag (in _slice)", node, self.module_name)
def _global(self, node, current_klass):
for name in node.names:
name_type, pyname, jsname, depth, is_local = self.lookup(name)
if name_type is None:
# Not defined yet.
name_type = 'variable'
pyname = name
jsname = self.scopeName(name, depth, is_local)
else:
name_type = 'global'
self.add_lookup(name_type, pyname, jsname)
def _if_expr(self, node, current_klass):
test = self.expr(node.test, current_klass)
then = self.expr(node.then, current_klass)
else_ = self.expr(node.else_, current_klass)
return "(" + self.inline_bool_code(test) + "? (%(then)s) : (%(else_)s))" % locals()
def _backquote(self, node, current_klass):
return "@{{repr}}(%s)" % self.expr(node.expr, current_klass)
def expr(self, node, current_klass):
if isinstance(node, self.ast.Const):
return self._const(node)
# @@@ not sure if the parentheses should be here or in individual operator functions - JKT
elif isinstance(node, self.ast.Mul):
return self._mul(node, current_klass)
elif isinstance(node, self.ast.Add):
return self._add(node, current_klass)
elif isinstance(node, self.ast.Sub):
return self._sub(node, current_klass)
elif isinstance(node, self.ast.Div):
return self._div(node, current_klass)
elif isinstance(node, self.ast.FloorDiv):
return self._floordiv(node, current_klass)
elif isinstance(node, self.ast.Mod):
return self._mod(node, current_klass)
elif isinstance(node, self.ast.Power):
return self._power(node, current_klass)
elif isinstance(node, self.ast.UnaryAdd):
return self._unaryadd(node, current_klass)
elif isinstance(node, self.ast.UnarySub):
return self._unarysub(node, current_klass)
elif isinstance(node, self.ast.Not):
return self._not(node, current_klass)
elif isinstance(node, self.ast.Or):
return self._or(node, current_klass)
elif isinstance(node, self.ast.And):
return self._and(node, current_klass)
elif isinstance(node, self.ast.Invert):
return self._invert(node, current_klass)
elif isinstance(node,self.ast.LeftShift):
return self._bitshiftleft(node, current_klass)
elif isinstance(node, self.ast.RightShift):
return self._bitshiftright(node, current_klass)
elif isinstance(node, self.ast.Bitand):
return self._bitand(node, current_klass)
elif isinstance(node, self.ast.Bitxor):
return self._bitxor(node, current_klass)
elif isinstance(node, self.ast.Bitor):
return self._bitor(node, current_klass)
elif isinstance(node, self.ast.Compare):
return self._compare(node, current_klass)
elif isinstance(node, self.ast.CallFunc):
return self._callfunc(node, current_klass, optlocal_var=True)
elif isinstance(node, self.ast.Name):
return self._name(node, current_klass, optlocal_var=True)
elif isinstance(node, self.ast.Subscript):
return self._subscript(node, current_klass)
elif isinstance(node, self.ast.Getattr):
attr_ = self._getattr(node, current_klass)
if len(attr_) == 1:
return attr_[0]
attr = self.attrib_join(attr_)
attr_left = self.attrib_join(attr_[:-1])
attr_right = attr_[-1]
attrstr = attr
v = self.uniqid('$attr')
vl = self.uniqid('$attr')
self.add_lookup('variable', v, v)
self.add_lookup('variable', vl, vl)
if self.bound_methods or self.descriptors:
getattr_condition = """(%(v)s=(%(vl)s=%(attr_left)s)['%(attr_right)s']) == null || ((%(vl)s.__is_instance__) && typeof %(v)s == 'function')"""
if self.descriptors:
getattr_condition += """ || (typeof %(v)s['__get__'] == 'function')"""
attr_code = """\
(""" + getattr_condition + """?
\t@{{getattr}}(%(vl)s, '%(attr_right)s'):
\t%(attr)s)\
"""
attr_code = ('\n'+self.spacing()+"\t\t").join(attr_code.split('\n'))
else:
attr_code = "%(attr)s"
attr_code = attr_code % locals()
s = self.spacing()
orig_attr = attr
if not self.attribute_checking:
attr = attr_code
else:
if attr.find('(') < 0 and not self.debug:
attrstr = attr.replace("\n", "\n\\")
attr = """(typeof %(attr)s=='undefined'?
%(s)s\t\t(function(){throw TypeError("%(attrstr)s is undefined");})():
%(s)s\t\t%(attr_code)s)""" % locals()
else:
attr_ = attr
if self.source_tracking or self.debug:
_source_tracking = self.source_tracking
_debug = self.debug
_attribute_checking = self.attribute_checking
self.attribute_checking = self.source_tracking = self.debug = False
attr_ = self.attrib_join(self._getattr(node, current_klass))
self.source_tracking = _source_tracking
self.debug = _debug
self.attribute_checking = _attribute_checking
attrstr = attr_.replace("\n", "\\\n")
attr = """(function(){
%(s)s\tvar $pyjs__testval=%(attr_code)s;
%(s)s\treturn (typeof $pyjs__testval=='undefined'?
%(s)s\t\t(function(){throw TypeError(\"%(attrstr)s is undefined");})():
%(s)s\t\t$pyjs__testval);
%(s)s})()""" % locals()
if True: # not self.attribute_checking or self.inline_code:
return attr
bound_methods = self.bound_methods and "true" or "false"
descriptors = self.descriptors and "true" or "false"
attribute_checking = self.attribute_checking and "true" or "false"
source_tracking = self.source_tracking and "true" or "false"
attr = """\
@{{__getattr_check}}(%(attr)s, %(attr_left)s, %(attr_right)s,\
"%(attrstr)s", %(bound_methods)s, %(descriptors)s, %(attribute_checking)s,\
%(source_tracking)s)
""" % locals()
return attr
elif isinstance(node, self.ast.List):
return self._list(node, current_klass)
elif isinstance(node, self.ast.Dict):
return self._dict(node, current_klass)
elif isinstance(node, self.ast.Tuple):
return self._tuple(node, current_klass)
elif isinstance(node, self.ast.Slice):
return self._slice(node, current_klass)
elif isinstance(node, self.ast.Lambda):
return self._lambda(node, current_klass)
elif isinstance(node, self.ast.ListComp):
return self._listcomp(node, current_klass)
elif isinstance(node, self.ast.IfExp):
return self._if_expr(node, current_klass)
elif isinstance(node, self.ast.Yield):
return self._yield_expr(node, current_klass)
elif isinstance(node, self.ast.Backquote):
return self._backquote(node, current_klass)
elif isinstance(node, self.ast.GenExpr):
return self._genexpr(node, current_klass)
else:
raise TranslationError(
"unsupported type (in expr)", node, self.module_name)
def import_compiler(internal_ast):
if internal_ast:
from lib2to3 import compiler
else:
import compiler
return compiler
def translate(compiler, sources, output_file, module_name=None, **kw):
kw = dict(all_compile_options, **kw)
list_imports = kw.get('list_imports', False)
sources = map(os.path.abspath, sources)
output_file = os.path.abspath(output_file)
if not module_name:
module_name, extension = os.path.splitext(os.path.basename(sources[0]))
trees = []
tree= None
for src in sources:
current_tree = compiler.parseFile(src)
flags = set()
f = file(src)
for l in f:
if l.startswith('#@PYJS_'):
flags.add(l.strip()[7:])
f.close()
if tree:
tree = merge(compiler.ast, module_name, tree, current_tree, flags)
else:
tree = current_tree
#XXX: if we have an override the sourcefile and the tree is not the same!
f = file(sources[0], "r")
src = f.read()
f.close()
if list_imports:
v = ImportVisitor(module_name)
compiler.walk(tree, v)
return v.imported_modules, v.imported_js
if output_file == '-':
output = sys.stdout
else:
output = file(output_file, 'w')
t = Translator(compiler,
module_name, sources[0], src, tree, output, **kw)
output.close()
return t.imported_modules, t.imported_js
def merge(ast, module_name, tree1, tree2, flags):
if 'FULL_OVERRIDE' in flags:
return tree2
for child in tree2.node:
if isinstance(child, ast.Function):
replaceFunction(ast, module_name, tree1, child.name, child)
elif isinstance(child, ast.Class):
replaceClassMethods(ast, module_name, tree1, child.name, child)
else:
raise TranslationError(
"Do not know how to merge %s" % child, child, module_name)
return tree1
def replaceFunction(ast, module_name, tree, function_name, function_node):
# find function to replace
for child in tree.node:
if isinstance(child, ast.Function) and child.name == function_name:
copyFunction(child, function_node)
return
raise TranslationError(
"function not found: " + function_name, function_node, module_name)
def copyFunction(target, source):
target.code = source.code
target.argnames = source.argnames
target.defaults = source.defaults
target.doc = source.doc # @@@ not sure we need to do this any more
def addCode(target, source):
target.nodes.append(source)
def replaceClassMethods(ast, module_name, tree, class_name, class_node):
# find class to replace
old_class_node = None
for child in tree.node:
if isinstance(child, ast.Class) and child.name == class_name:
old_class_node = child
break
if not old_class_node:
raise TranslationError(
"class not found: " + class_name, class_node, module_name)
# replace methods
for node in class_node.code:
if isinstance(node, ast.Function):
found = False
for child in old_class_node.code:
if isinstance(child, ast.Function) and child.name == node.name:
found = True
copyFunction(child, node)
break
if not found:
raise TranslationError(
"class method not found: " + class_name + "." + node.name,
node, module_name)
elif isinstance(node, ast.Assign) and \
isinstance(node.nodes[0], ast.AssName):
found = False
for child in old_class_node.code:
if isinstance(child, ast.Assign) and \
eqNodes(child.nodes, node.nodes):
found = True
copyAssign(child, node)
if not found:
addCode(old_class_node.code, node)
elif isinstance(node, ast.Pass):
pass
else:
raise TranslationError(
"Do not know how to merge %s" % node, node, self.module_name)
class PlatformParser:
def __init__(self, compiler,
platform_dir = "", verbose=True, chain_plat=None):
self.platform_dir = platform_dir
self.parse_cache = {}
self.platform = ""
self.verbose = verbose
self.chain_plat = chain_plat
self.compiler = compiler
def setPlatform(self, platform):
self.platform = platform
def parseModule(self, module_name, file_name):
importing = False
if not self.parse_cache.has_key(file_name):
importing = True
if self.chain_plat:
mod, override = self.chain_plat.parseModule(module_name,
file_name)
else:
mod = self.compiler.parseFile(file_name)
self.parse_cache[file_name] = mod
else:
mod = self.parse_cache[file_name]
override = False
platform_file_name = self.generatePlatformFilename(file_name)
if self.platform and os.path.isfile(platform_file_name):
mod = copy.deepcopy(mod)
mod_override = self.compiler.parseFile(platform_file_name)
if self.verbose:
print "Merging", module_name, self.platform
self.merge(smod, mod_override)
override = True
if self.verbose:
if override:
print "Importing %s (Platform %s)" % (module_name, self.platform)
elif importing:
print "Importing %s" % (module_name)
return mod, override
def generatePlatformFilename(self, file_name):
(module_name, extension) = os.path.splitext(os.path.basename(file_name))
platform_file_name = module_name + self.platform + extension
return os.path.join(os.path.dirname(file_name), self.platform_dir, platform_file_name)
def replaceFunction(self, tree, function_name, function_node):
# find function to replace
for child in tree.node:
if isinstance(child, self.ast.Function) and child.name == function_name:
self.copyFunction(child, function_node)
return
raise TranslationError(
"function not found: " + function_name,
function_node, self.module_name)
def replaceClassMethods(self, tree, class_name, class_node):
# find class to replace
old_class_node = None
for child in tree.node:
if isinstance(child, self.ast.Class) and child.name == class_name:
old_class_node = child
break
if not old_class_node:
raise TranslationError(
"class not found: " + class_name, class_node, self.module_name)
# replace methods
for node in class_node.code:
if isinstance(node, self.ast.Function):
found = False
for child in old_class_node.code:
if isinstance(child, self.ast.Function) and child.name == node.name:
found = True
self.copyFunction(child, node)
break
if not found:
raise TranslationError(
"class method not found: " + class_name + "." + node.name,
node, self.module_name)
elif isinstance(node, self.ast.Assign) and \
isinstance(node.nodes[0], self.ast.AssName):
found = False
for child in old_class_node.code:
if isinstance(child, self.ast.Assign) and \
self.eqNodes(child.nodes, node.nodes):
found = True
self.copyAssign(child, node)
if not found:
self.addCode(old_class_node.code, node)
elif isinstance(node, self.ast.Pass):
pass
else:
raise TranslationError(
"Do not know how to merge %s" % node,
node, self.module_name)
def copyFunction(self, target, source):
target.code = source.code
target.argnames = source.argnames
target.defaults = source.defaults
target.doc = source.doc # @@@ not sure we need to do this any more
def copyAssign(self, target, source):
target.nodes = source.nodes
target.expr = source.expr
target.lineno = source.lineno
return
def eqNodes(self, nodes1, nodes2):
return str(nodes1) == str(nodes2)
def dotreplace(fname):
path, ext = os.path.splitext(fname)
return path.replace(".", "/") + ext
class ImportVisitor(object):
def __init__(self, module_name):
self.module_name = module_name
self.imported_modules = []
self.imported_js = []
def add_imported_module(self, importName):
if not importName in self.imported_modules:
self.imported_modules.append(importName)
def visitModule(self, node):
self.visit(node.node)
def visitImport(self, node):
self._doImport(node.names)
def _doImport(self, names):
for importName, importAs in names:
if importName == '__pyjamas__':
continue
if importName.endswith(".js"):
continue
imp.add_imported_js(importName)
continue
self.add_imported_module(importName)
def visitFrom(self, node):
if node.modname == '__pyjamas__':
return
if node.modname == '__javascript__':
return
# XXX: hack for in-function checking, we should have another
# object to check our scope
absPath = False
modname = node.modname
if hasattr(node, 'level') and node.level > 0:
absPath = True
modname = self.module_name.split('.')
level = node.level
if len(modname) < level:
raise TranslationError(
"Attempted relative import beyond toplevel package",
node, self.module_name)
if node.modname != '':
level += 1
if level > 1:
modname = '.'.join(modname[:-(node.level-1)])
else:
modname = self.module_name
if node.modname != '':
modname += '.' + node.modname
if modname[0] == '.':
modname = modname[1:]
for name in node.names:
sub = modname + '.' + name[0]
ass_name = name[1] or name[0]
self._doImport(((sub, ass_name),))
class AppTranslator:
def __init__(self, compiler,
library_dirs=[], parser=None, dynamic=False,
verbose=True,
debug=False,
print_statements=True,
function_argument_checking=True,
attribute_checking=True,
bound_methods=True,
descriptors=True,
source_tracking=True,
line_tracking=True,
store_source=True,
inline_code=False,
operator_funcs=True,
number_classes=True,
):
self.compiler = compiler
self.extension = ".py"
self.print_statements = print_statements
self.library_modules = []
self.overrides = {}
self.library_dirs = path + library_dirs
self.dynamic = dynamic
self.verbose = verbose
self.debug = debug
self.print_statements = print_statements
self.function_argument_checking = function_argument_checking
self.attribute_checking = attribute_checking
self.bound_methods = bound_methods
self.descriptors = descriptors
self.source_tracking = source_tracking
self.line_tracking = line_tracking
self.store_source = store_source
self.inline_code = inline_code
self.operator_funcs = operator_funcs
self.number_classes = number_classes
if not parser:
self.parser = PlatformParser(self.compiler)
else:
self.parser = parser
self.parser.dynamic = dynamic
def findFile(self, file_name):
if os.path.isfile(file_name):
return file_name
for library_dir in self.library_dirs:
file_name = dotreplace(file_name)
full_file_name = os.path.join(
LIBRARY_PATH, library_dir, file_name)
if os.path.isfile(full_file_name):
return full_file_name
fnameinit, ext = os.path.splitext(file_name)
fnameinit = fnameinit + "/__init__.py"
full_file_name = os.path.join(
LIBRARY_PATH, library_dir, fnameinit)
if os.path.isfile(full_file_name):
return full_file_name
raise Exception("file not found: " + file_name)
def _translate(self, module_name, debug=False):
self.library_modules.append(module_name)
file_name = self.findFile(module_name + self.extension)
output = StringIO()
f = file(file_name, "r")
src = f.read()
f.close()
mod, override = self.parser.parseModule(module_name, file_name)
if override:
override_name = "%s.%s" % (self.parser.platform.lower(),
module_name)
self.overrides[override_name] = override_name
t = Translator(self.compiler,
module_name, file_name, src, mod, output,
self.dynamic, self.findFile,
debug = self.debug,
print_statements = self.print_statements,
function_argument_checking = self.function_argument_checking,
attribute_checking = self.attribute_checking,
bound_methods = self.bound_methods,
descriptors = self.descriptors,
source_tracking = self.source_tracking,
line_tracking = self.line_tracking,
store_source = self.store_source,
inline_code = self.inline_code,
operator_funcs = self.operator_funcs,
number_classes = self.number_classes,
)
module_str = output.getvalue()
imported_modules_str = ""
for module in t.imported_modules:
if module not in self.library_modules:
self.library_modules.append(module)
return imported_modules_str + module_str
def translate(self, module_name, is_app=True, debug=False,
library_modules=[]):
app_code = StringIO()
lib_code = StringIO()
imported_js = []
self.library_modules = []
self.overrides = {}
for library in library_modules:
if library.endswith(".js"):
imported_js.append(library)
continue
self.library_modules.append(library)
if self.verbose:
print 'Including LIB', library
print >> lib_code, '\n//\n// BEGIN LIB '+library+'\n//\n'
print >> lib_code, self._translate(
library, False, debug=debug, imported_js=imported_js)
print >> lib_code, "/* initialize static library */"
print >> lib_code, "%s();\n" % library
print >> lib_code, '\n//\n// END LIB '+library+'\n//\n'
if module_name:
print >> app_code, self._translate(
module_name, is_app, debug=debug, imported_js=imported_js)
for js in imported_js:
path = self.findFile(js)
if os.path.isfile(path):
if self.verbose:
print 'Including JS', js
print >> lib_code, '\n//\n// BEGIN JS '+js+'\n//\n'
print >> lib_code, file(path).read()
print >> lib_code, '\n//\n// END JS '+js+'\n//\n'
else:
print >>sys.stderr, 'Warning: Unable to find imported javascript:', js
return lib_code.getvalue(), app_code.getvalue()
|
minghuascode/pyj
|
pyjs/src/pyjs/translator_proto-KEES.py
|
Python
|
apache-2.0
| 193,343
|
[
"VisIt"
] |
31ecdee43250de2b39027de48a32a54e0acf4b020773ff302fe09a84d9f5e6eb
|
# -*- coding: utf-8 -*-
import execjs
from .xn_data import XNCoords
from .xn_parser import XNParserBase, safe_int, get_attribute
from . import xn_logger
logger = xn_logger.get(__name__, debug=False)
class GalaxyParser(XNParserBase):
def __init__(self):
super(GalaxyParser, self).__init__()
self._in_galaxy = False
self.script_body = ''
self.galaxy_rows = []
def clear(self):
self.script_body = ''
self.galaxy_rows = []
def handle_starttag(self, tag: str, attrs: list):
super(GalaxyParser, self).handle_starttag(tag, attrs)
if tag == 'div':
# find [<div id='galaxy'>]
div_id = get_attribute(attrs, 'id')
if div_id is not None:
if div_id == 'galaxy':
self._in_galaxy = True
def handle_endtag(self, tag: str):
super(GalaxyParser, self).handle_endtag(tag)
if self._in_galaxy and tag == 'script':
self._in_galaxy = False
# automatically parse js script
if self.script_body != '':
self.unscramble_galaxy_script()
def handle_data2(self, data: str, tag: str, attrs: list):
if self._in_galaxy and (tag == 'script'):
self.script_body = data
# logger.debug('Got galaxy script: [{0}]'.format(self.script_body))
return # def handle_data()
def unscramble_galaxy_script(self):
if not self.script_body.startswith('var Deuterium = '):
logger.error('Invalid format of script body: cannot parse it!')
return None
eval_start = self.script_body.find('eval(function(p,a,c,k,e,d)')
if eval_start == -1:
logger.error('parse error (1)')
return None
eval_end = self.script_body.find("$('#galaxy').append(PrintRow());")
if eval_end == -1:
logger.error('parse error (2)')
return None
eval_text = self.script_body[eval_start:eval_end]
eval_text = eval_text.strip()
logger.debug('eval [{0}]'.format(eval_text))
# ^^ [eval(function(p,a,c,k,e,d){e=function(c){r... ...141|7866|u0426'.split('|')))]
inner_eval = eval_text[5:-1]
logger.debug('inner eval [{0}]'.format(inner_eval))
# ^^ [function(p,a,c,k,e,d){e=functi... ...0426'.split('|'))]
#
# create JS interptreter and eval that
js_runtimes = execjs.available_runtimes()
if 'Node' in js_runtimes:
js = execjs.get('Node')
else:
js = execjs.get() # default
logger.debug('Using [{0}] as JS runtime.'.format(js.name))
eval_res = js.eval(inner_eval)
# Now, eval_res is a string:
# row[12]={"planet":12,"id_planet":54448,"ally_planet":0,"metal":0,"crystal":0,
# "name":"\u0413\u043b\u0430\u0432\u043d\u0430\u044f \u043f\u043b\u0430\u043d\u0435\u0442\u0430",
# "planet_type":1,"destruyed":0,"image":"normaltempplanet02","last_active":60,"parent_planet":0,
# "luna_id":null,"luna_name":null,"luna_destruyed":null,"luna_diameter":null,"luna_temp":null,
# "user_id":71992,"username":"\u041e\u041b\u0415\u0413 \u041a\u0410\u0420\u041f\u0415\u041d\u041a\u041e",
# "race":4,"ally_id":0,"authlevel":0,"onlinetime":1,"urlaubs_modus_time":0,"banaday":0,"sex":1,
# "avatar":7,"user_image":"","ally_name":null,"ally_members":null,"ally_web":null,"ally_tag":null,
# "type":null,"total_rank":7865,"total_points":0};row[9]={"planet":9,"id_planet":54450,"ally_planet":0,
# "metal":0,"crystal":0,"name":"Arnon","planet_type":1,"destruyed":0,"image":"normaltempplanet08",
# "last_active":0,"parent_planet":0,"luna_id":null,"luna_name":null,"luna_destruyed":null,
# "luna_diameter":null,"luna_temp":null,"user_id":71995,"username":"minlexx","race":4,"ally_id":389,
# "authlevel":0,"onlinetime":0,"urlaubs_modus_time":0,"banaday":0,"sex":1,"avatar":5,
# "user_image":"71995_1440872455.jpg","ally_name":"Fury","ally_members":8,"ally_web":"",
# "ally_tag":"Fury","type":null,"total_rank":141,"total_points":115582};
# ...
# we ned to eval() this string again, slightly modified, to get resulting row:
eval_res = 'var row = []; ' + eval_res + "\nreturn row;"
ctx = js.compile(eval_res)
self.galaxy_rows = ctx.exec_(eval_res)
# print(type(self.galaxy_rows))
# print(self.galaxy_rows)
# <class 'list'>
# [None, None, None, None, None, None, None,
# {
# 'type': None,
# 'planet_type': 1,
# 'total_points': 0,
# 'ally_planet': 0,
# 'ally_web': None,
# 'urlaubs_modus_time': 0,
# 'crystal': 0,
# 'user_id': 71993,
# 'name': 'Главная планета',
# 'ally_tag': None,
# 'last_active': 60,
# 'luna_name': None,
# 'planet': 7,
# 'luna_diameter': None,
# 'ally_id': 0,
# 'onlinetime': 1,
# 'luna_id': None,
# 'parent_planet': 0,
# 'sex': 1,
# 'ally_name': None,
# 'avatar': 8,
# 'user_image': '',
# 'destruyed': 0,
# 'banaday': 0,
# 'luna_temp': None,
# 'race': 4,
# 'image': 'normaltempplanet09',
# 'username': 'Дмитрий и Марина Цыкуновы',
# 'luna_destruyed': None,
# 'metal': 0,
# 'id_planet': 54449,
# 'authlevel': 0,
# 'ally_members': None,
# 'total_rank': 7866
# },
# None, ... ]
|
minlexx/xnovacmd
|
ui/xnova/xn_parser_galaxy.py
|
Python
|
gpl-2.0
| 5,683
|
[
"CRYSTAL",
"Galaxy"
] |
916db7f3fe1ea4a9bbe7e04a9afc13430943146c92a7d0b75e3ee44ccde6301e
|
"""SCons.Util
Various utility functions go here.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Util.py 4369 2009/09/19 15:58:29 scons"
import copy
import os
import os.path
import re
import string
import sys
import types
from UserDict import UserDict
from UserList import UserList
from UserString import UserString
# Don't "from types import ..." these because we need to get at the
# types module later to look for UnicodeType.
DictType = types.DictType
InstanceType = types.InstanceType
ListType = types.ListType
StringType = types.StringType
TupleType = types.TupleType
def dictify(keys, values, result={}):
for k, v in zip(keys, values):
result[k] = v
return result
_altsep = os.altsep
if _altsep is None and sys.platform == 'win32':
# My ActivePython 2.0.1 doesn't set os.altsep! What gives?
_altsep = '/'
if _altsep:
def rightmost_separator(path, sep, _altsep=_altsep):
rfind = string.rfind
return max(rfind(path, sep), rfind(path, _altsep))
else:
rightmost_separator = string.rfind
# First two from the Python Cookbook, just for completeness.
# (Yeah, yeah, YAGNI...)
def containsAny(str, set):
"""Check whether sequence str contains ANY of the items in set."""
for c in set:
if c in str: return 1
return 0
def containsAll(str, set):
"""Check whether sequence str contains ALL of the items in set."""
for c in set:
if c not in str: return 0
return 1
def containsOnly(str, set):
"""Check whether sequence str contains ONLY items in set."""
for c in str:
if c not in set: return 0
return 1
def splitext(path):
"Same as os.path.splitext() but faster."
sep = rightmost_separator(path, os.sep)
dot = string.rfind(path, '.')
# An ext is only real if it has at least one non-digit char
if dot > sep and not containsOnly(path[dot:], "0123456789."):
return path[:dot],path[dot:]
else:
return path,""
def updrive(path):
"""
Make the drive letter (if any) upper case.
This is useful because Windows is inconsitent on the case
of the drive letter, which can cause inconsistencies when
calculating command signatures.
"""
drive, rest = os.path.splitdrive(path)
if drive:
path = string.upper(drive) + rest
return path
class NodeList(UserList):
"""This class is almost exactly like a regular list of Nodes
(actually it can hold any object), with one important difference.
If you try to get an attribute from this list, it will return that
attribute from every item in the list. For example:
>>> someList = NodeList([ ' foo ', ' bar ' ])
>>> someList.strip()
[ 'foo', 'bar' ]
"""
def __nonzero__(self):
return len(self.data) != 0
def __str__(self):
return string.join(map(str, self.data))
def __iter__(self):
return iter(self.data)
def __call__(self, *args, **kwargs):
result = map(lambda x, args=args, kwargs=kwargs: apply(x,
args,
kwargs),
self.data)
return self.__class__(result)
def __getattr__(self, name):
result = map(lambda x, n=name: getattr(x, n), self.data)
return self.__class__(result)
_get_env_var = re.compile(r'^\$([_a-zA-Z]\w*|{[_a-zA-Z]\w*})$')
def get_environment_var(varstr):
"""Given a string, first determine if it looks like a reference
to a single environment variable, like "$FOO" or "${FOO}".
If so, return that variable with no decorations ("FOO").
If not, return None."""
mo=_get_env_var.match(to_String(varstr))
if mo:
var = mo.group(1)
if var[0] == '{':
return var[1:-1]
else:
return var
else:
return None
class DisplayEngine:
def __init__(self):
self.__call__ = self.print_it
def print_it(self, text, append_newline=1):
if append_newline: text = text + '\n'
try:
sys.stdout.write(text)
except IOError:
# Stdout might be connected to a pipe that has been closed
# by now. The most likely reason for the pipe being closed
# is that the user has press ctrl-c. It this is the case,
# then SCons is currently shutdown. We therefore ignore
# IOError's here so that SCons can continue and shutdown
# properly so that the .sconsign is correctly written
# before SCons exits.
pass
def dont_print(self, text, append_newline=1):
pass
def set_mode(self, mode):
if mode:
self.__call__ = self.print_it
else:
self.__call__ = self.dont_print
def render_tree(root, child_func, prune=0, margin=[0], visited={}):
"""
Render a tree of nodes into an ASCII tree view.
root - the root node of the tree
child_func - the function called to get the children of a node
prune - don't visit the same node twice
margin - the format of the left margin to use for children of root.
1 results in a pipe, and 0 results in no pipe.
visited - a dictionary of visited nodes in the current branch if not prune,
or in the whole tree if prune.
"""
rname = str(root)
children = child_func(root)
retval = ""
for pipe in margin[:-1]:
if pipe:
retval = retval + "| "
else:
retval = retval + " "
if visited.has_key(rname):
return retval + "+-[" + rname + "]\n"
retval = retval + "+-" + rname + "\n"
if not prune:
visited = copy.copy(visited)
visited[rname] = 1
for i in range(len(children)):
margin.append(i<len(children)-1)
retval = retval + render_tree(children[i], child_func, prune, margin, visited
)
margin.pop()
return retval
IDX = lambda N: N and 1 or 0
def print_tree(root, child_func, prune=0, showtags=0, margin=[0], visited={}):
"""
Print a tree of nodes. This is like render_tree, except it prints
lines directly instead of creating a string representation in memory,
so that huge trees can be printed.
root - the root node of the tree
child_func - the function called to get the children of a node
prune - don't visit the same node twice
showtags - print status information to the left of each node line
margin - the format of the left margin to use for children of root.
1 results in a pipe, and 0 results in no pipe.
visited - a dictionary of visited nodes in the current branch if not prune,
or in the whole tree if prune.
"""
rname = str(root)
if showtags:
if showtags == 2:
print ' E = exists'
print ' R = exists in repository only'
print ' b = implicit builder'
print ' B = explicit builder'
print ' S = side effect'
print ' P = precious'
print ' A = always build'
print ' C = current'
print ' N = no clean'
print ' H = no cache'
print ''
tags = ['[']
tags.append(' E'[IDX(root.exists())])
tags.append(' R'[IDX(root.rexists() and not root.exists())])
tags.append(' BbB'[[0,1][IDX(root.has_explicit_builder())] +
[0,2][IDX(root.has_builder())]])
tags.append(' S'[IDX(root.side_effect)])
tags.append(' P'[IDX(root.precious)])
tags.append(' A'[IDX(root.always_build)])
tags.append(' C'[IDX(root.is_up_to_date())])
tags.append(' N'[IDX(root.noclean)])
tags.append(' H'[IDX(root.nocache)])
tags.append(']')
else:
tags = []
def MMM(m):
return [" ","| "][m]
margins = map(MMM, margin[:-1])
children = child_func(root)
if prune and visited.has_key(rname) and children:
print string.join(tags + margins + ['+-[', rname, ']'], '')
return
print string.join(tags + margins + ['+-', rname], '')
visited[rname] = 1
if children:
margin.append(1)
idx = IDX(showtags)
for C in children[:-1]:
print_tree(C, child_func, prune, idx, margin, visited)
margin[-1] = 0
print_tree(children[-1], child_func, prune, idx, margin, visited)
margin.pop()
# Functions for deciding if things are like various types, mainly to
# handle UserDict, UserList and UserString like their underlying types.
#
# Yes, all of this manual testing breaks polymorphism, and the real
# Pythonic way to do all of this would be to just try it and handle the
# exception, but handling the exception when it's not the right type is
# often too slow.
try:
class mystr(str):
pass
except TypeError:
# An older Python version without new-style classes.
#
# The actual implementations here have been selected after timings
# coded up in in bench/is_types.py (from the SCons source tree,
# see the scons-src distribution), mostly against Python 1.5.2.
# Key results from those timings:
#
# -- Storing the type of the object in a variable (t = type(obj))
# slows down the case where it's a native type and the first
# comparison will match, but nicely speeds up the case where
# it's a different native type. Since that's going to be
# common, it's a good tradeoff.
#
# -- The data show that calling isinstance() on an object that's
# a native type (dict, list or string) is expensive enough
# that checking up front for whether the object is of type
# InstanceType is a pretty big win, even though it does slow
# down the case where it really *is* an object instance a
# little bit.
def is_Dict(obj):
t = type(obj)
return t is DictType or \
(t is InstanceType and isinstance(obj, UserDict))
def is_List(obj):
t = type(obj)
return t is ListType \
or (t is InstanceType and isinstance(obj, UserList))
def is_Sequence(obj):
t = type(obj)
return t is ListType \
or t is TupleType \
or (t is InstanceType and isinstance(obj, UserList))
def is_Tuple(obj):
t = type(obj)
return t is TupleType
if hasattr(types, 'UnicodeType'):
def is_String(obj):
t = type(obj)
return t is StringType \
or t is UnicodeType \
or (t is InstanceType and isinstance(obj, UserString))
else:
def is_String(obj):
t = type(obj)
return t is StringType \
or (t is InstanceType and isinstance(obj, UserString))
def is_Scalar(obj):
return is_String(obj) or not is_Sequence(obj)
def flatten(obj, result=None):
"""Flatten a sequence to a non-nested list.
Flatten() converts either a single scalar or a nested sequence
to a non-nested list. Note that flatten() considers strings
to be scalars instead of sequences like Python would.
"""
if is_Scalar(obj):
return [obj]
if result is None:
result = []
for item in obj:
if is_Scalar(item):
result.append(item)
else:
flatten_sequence(item, result)
return result
def flatten_sequence(sequence, result=None):
"""Flatten a sequence to a non-nested list.
Same as flatten(), but it does not handle the single scalar
case. This is slightly more efficient when one knows that
the sequence to flatten can not be a scalar.
"""
if result is None:
result = []
for item in sequence:
if is_Scalar(item):
result.append(item)
else:
flatten_sequence(item, result)
return result
#
# Generic convert-to-string functions that abstract away whether or
# not the Python we're executing has Unicode support. The wrapper
# to_String_for_signature() will use a for_signature() method if the
# specified object has one.
#
if hasattr(types, 'UnicodeType'):
UnicodeType = types.UnicodeType
def to_String(s):
if isinstance(s, UserString):
t = type(s.data)
else:
t = type(s)
if t is UnicodeType:
return unicode(s)
else:
return str(s)
else:
to_String = str
def to_String_for_signature(obj):
try:
f = obj.for_signature
except AttributeError:
return to_String_for_subst(obj)
else:
return f()
def to_String_for_subst(s):
if is_Sequence( s ):
return string.join( map(to_String_for_subst, s) )
return to_String( s )
else:
# A modern Python version with new-style classes, so we can just use
# isinstance().
#
# We are using the following trick to speed-up these
# functions. Default arguments are used to take a snapshot of the
# the global functions and constants used by these functions. This
# transforms accesses to global variable into local variables
# accesses (i.e. LOAD_FAST instead of LOAD_GLOBAL).
DictTypes = (dict, UserDict)
ListTypes = (list, UserList)
SequenceTypes = (list, tuple, UserList)
# Empirically, Python versions with new-style classes all have
# unicode.
#
# Note that profiling data shows a speed-up when comparing
# explicitely with str and unicode instead of simply comparing
# with basestring. (at least on Python 2.5.1)
StringTypes = (str, unicode, UserString)
# Empirically, it is faster to check explicitely for str and
# unicode than for basestring.
BaseStringTypes = (str, unicode)
def is_Dict(obj, isinstance=isinstance, DictTypes=DictTypes):
return isinstance(obj, DictTypes)
def is_List(obj, isinstance=isinstance, ListTypes=ListTypes):
return isinstance(obj, ListTypes)
def is_Sequence(obj, isinstance=isinstance, SequenceTypes=SequenceTypes):
return isinstance(obj, SequenceTypes)
def is_Tuple(obj, isinstance=isinstance, tuple=tuple):
return isinstance(obj, tuple)
def is_String(obj, isinstance=isinstance, StringTypes=StringTypes):
return isinstance(obj, StringTypes)
def is_Scalar(obj, isinstance=isinstance, StringTypes=StringTypes, SequenceTypes=SequenceTypes):
# Profiling shows that there is an impressive speed-up of 2x
# when explicitely checking for strings instead of just not
# sequence when the argument (i.e. obj) is already a string.
# But, if obj is a not string than it is twice as fast to
# check only for 'not sequence'. The following code therefore
# assumes that the obj argument is a string must of the time.
return isinstance(obj, StringTypes) or not isinstance(obj, SequenceTypes)
def do_flatten(sequence, result, isinstance=isinstance,
StringTypes=StringTypes, SequenceTypes=SequenceTypes):
for item in sequence:
if isinstance(item, StringTypes) or not isinstance(item, SequenceTypes):
result.append(item)
else:
do_flatten(item, result)
def flatten(obj, isinstance=isinstance, StringTypes=StringTypes,
SequenceTypes=SequenceTypes, do_flatten=do_flatten):
"""Flatten a sequence to a non-nested list.
Flatten() converts either a single scalar or a nested sequence
to a non-nested list. Note that flatten() considers strings
to be scalars instead of sequences like Python would.
"""
if isinstance(obj, StringTypes) or not isinstance(obj, SequenceTypes):
return [obj]
result = []
for item in obj:
if isinstance(item, StringTypes) or not isinstance(item, SequenceTypes):
result.append(item)
else:
do_flatten(item, result)
return result
def flatten_sequence(sequence, isinstance=isinstance, StringTypes=StringTypes,
SequenceTypes=SequenceTypes, do_flatten=do_flatten):
"""Flatten a sequence to a non-nested list.
Same as flatten(), but it does not handle the single scalar
case. This is slightly more efficient when one knows that
the sequence to flatten can not be a scalar.
"""
result = []
for item in sequence:
if isinstance(item, StringTypes) or not isinstance(item, SequenceTypes):
result.append(item)
else:
do_flatten(item, result)
return result
#
# Generic convert-to-string functions that abstract away whether or
# not the Python we're executing has Unicode support. The wrapper
# to_String_for_signature() will use a for_signature() method if the
# specified object has one.
#
def to_String(s,
isinstance=isinstance, str=str,
UserString=UserString, BaseStringTypes=BaseStringTypes):
if isinstance(s,BaseStringTypes):
# Early out when already a string!
return s
elif isinstance(s, UserString):
# s.data can only be either a unicode or a regular
# string. Please see the UserString initializer.
return s.data
else:
return str(s)
def to_String_for_subst(s,
isinstance=isinstance, join=string.join, str=str, to_String=to_String,
BaseStringTypes=BaseStringTypes, SequenceTypes=SequenceTypes,
UserString=UserString):
# Note that the test cases are sorted by order of probability.
if isinstance(s, BaseStringTypes):
return s
elif isinstance(s, SequenceTypes):
l = []
for e in s:
l.append(to_String_for_subst(e))
return join( s )
elif isinstance(s, UserString):
# s.data can only be either a unicode or a regular
# string. Please see the UserString initializer.
return s.data
else:
return str(s)
def to_String_for_signature(obj, to_String_for_subst=to_String_for_subst,
AttributeError=AttributeError):
try:
f = obj.for_signature
except AttributeError:
return to_String_for_subst(obj)
else:
return f()
# The SCons "semi-deep" copy.
#
# This makes separate copies of lists (including UserList objects)
# dictionaries (including UserDict objects) and tuples, but just copies
# references to anything else it finds.
#
# A special case is any object that has a __semi_deepcopy__() method,
# which we invoke to create the copy, which is used by the BuilderDict
# class because of its extra initialization argument.
#
# The dispatch table approach used here is a direct rip-off from the
# normal Python copy module.
_semi_deepcopy_dispatch = d = {}
def _semi_deepcopy_dict(x):
copy = {}
for key, val in x.items():
# The regular Python copy.deepcopy() also deepcopies the key,
# as follows:
#
# copy[semi_deepcopy(key)] = semi_deepcopy(val)
#
# Doesn't seem like we need to, but we'll comment it just in case.
copy[key] = semi_deepcopy(val)
return copy
d[types.DictionaryType] = _semi_deepcopy_dict
def _semi_deepcopy_list(x):
return map(semi_deepcopy, x)
d[types.ListType] = _semi_deepcopy_list
def _semi_deepcopy_tuple(x):
return tuple(map(semi_deepcopy, x))
d[types.TupleType] = _semi_deepcopy_tuple
def _semi_deepcopy_inst(x):
if hasattr(x, '__semi_deepcopy__'):
return x.__semi_deepcopy__()
elif isinstance(x, UserDict):
return x.__class__(_semi_deepcopy_dict(x))
elif isinstance(x, UserList):
return x.__class__(_semi_deepcopy_list(x))
else:
return x
d[types.InstanceType] = _semi_deepcopy_inst
def semi_deepcopy(x):
copier = _semi_deepcopy_dispatch.get(type(x))
if copier:
return copier(x)
else:
return x
class Proxy:
"""A simple generic Proxy class, forwarding all calls to
subject. So, for the benefit of the python newbie, what does
this really mean? Well, it means that you can take an object, let's
call it 'objA', and wrap it in this Proxy class, with a statement
like this
proxyObj = Proxy(objA),
Then, if in the future, you do something like this
x = proxyObj.var1,
since Proxy does not have a 'var1' attribute (but presumably objA does),
the request actually is equivalent to saying
x = objA.var1
Inherit from this class to create a Proxy."""
def __init__(self, subject):
"""Wrap an object as a Proxy object"""
self.__subject = subject
def __getattr__(self, name):
"""Retrieve an attribute from the wrapped object. If the named
attribute doesn't exist, AttributeError is raised"""
return getattr(self.__subject, name)
def get(self):
"""Retrieve the entire wrapped object"""
return self.__subject
def __cmp__(self, other):
if issubclass(other.__class__, self.__subject.__class__):
return cmp(self.__subject, other)
return cmp(self.__dict__, other.__dict__)
# attempt to load the windows registry module:
can_read_reg = 0
try:
import _winreg
can_read_reg = 1
hkey_mod = _winreg
RegOpenKeyEx = _winreg.OpenKeyEx
RegEnumKey = _winreg.EnumKey
RegEnumValue = _winreg.EnumValue
RegQueryValueEx = _winreg.QueryValueEx
RegError = _winreg.error
except ImportError:
try:
import win32api
import win32con
can_read_reg = 1
hkey_mod = win32con
RegOpenKeyEx = win32api.RegOpenKeyEx
RegEnumKey = win32api.RegEnumKey
RegEnumValue = win32api.RegEnumValue
RegQueryValueEx = win32api.RegQueryValueEx
RegError = win32api.error
except ImportError:
class _NoError(Exception):
pass
RegError = _NoError
if can_read_reg:
HKEY_CLASSES_ROOT = hkey_mod.HKEY_CLASSES_ROOT
HKEY_LOCAL_MACHINE = hkey_mod.HKEY_LOCAL_MACHINE
HKEY_CURRENT_USER = hkey_mod.HKEY_CURRENT_USER
HKEY_USERS = hkey_mod.HKEY_USERS
def RegGetValue(root, key):
"""This utility function returns a value in the registry
without having to open the key first. Only available on
Windows platforms with a version of Python that can read the
registry. Returns the same thing as
SCons.Util.RegQueryValueEx, except you just specify the entire
path to the value, and don't have to bother opening the key
first. So:
Instead of:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,
r'SOFTWARE\Microsoft\Windows\CurrentVersion')
out = SCons.Util.RegQueryValueEx(k,
'ProgramFilesDir')
You can write:
out = SCons.Util.RegGetValue(SCons.Util.HKEY_LOCAL_MACHINE,
r'SOFTWARE\Microsoft\Windows\CurrentVersion\ProgramFilesDir')
"""
# I would use os.path.split here, but it's not a filesystem
# path...
p = key.rfind('\\') + 1
keyp = key[:p-1] # -1 to omit trailing slash
val = key[p:]
k = RegOpenKeyEx(root, keyp)
return RegQueryValueEx(k,val)
else:
try:
e = WindowsError
except NameError:
# Make sure we have a definition of WindowsError so we can
# run platform-independent tests of Windows functionality on
# platforms other than Windows. (WindowsError is, in fact, an
# OSError subclass on Windows.)
class WindowsError(OSError):
pass
import __builtin__
__builtin__.WindowsError = WindowsError
else:
del e
HKEY_CLASSES_ROOT = None
HKEY_LOCAL_MACHINE = None
HKEY_CURRENT_USER = None
HKEY_USERS = None
def RegGetValue(root, key):
raise WindowsError
def RegOpenKeyEx(root, key):
raise WindowsError
if sys.platform == 'win32':
def WhereIs(file, path=None, pathext=None, reject=[]):
if path is None:
try:
path = os.environ['PATH']
except KeyError:
return None
if is_String(path):
path = string.split(path, os.pathsep)
if pathext is None:
try:
pathext = os.environ['PATHEXT']
except KeyError:
pathext = '.COM;.EXE;.BAT;.CMD'
if is_String(pathext):
pathext = string.split(pathext, os.pathsep)
for ext in pathext:
if string.lower(ext) == string.lower(file[-len(ext):]):
pathext = ['']
break
if not is_List(reject) and not is_Tuple(reject):
reject = [reject]
for dir in path:
f = os.path.join(dir, file)
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
try:
reject.index(fext)
except ValueError:
return os.path.normpath(fext)
continue
return None
elif os.name == 'os2':
def WhereIs(file, path=None, pathext=None, reject=[]):
if path is None:
try:
path = os.environ['PATH']
except KeyError:
return None
if is_String(path):
path = string.split(path, os.pathsep)
if pathext is None:
pathext = ['.exe', '.cmd']
for ext in pathext:
if string.lower(ext) == string.lower(file[-len(ext):]):
pathext = ['']
break
if not is_List(reject) and not is_Tuple(reject):
reject = [reject]
for dir in path:
f = os.path.join(dir, file)
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
try:
reject.index(fext)
except ValueError:
return os.path.normpath(fext)
continue
return None
else:
def WhereIs(file, path=None, pathext=None, reject=[]):
import stat
if path is None:
try:
path = os.environ['PATH']
except KeyError:
return None
if is_String(path):
path = string.split(path, os.pathsep)
if not is_List(reject) and not is_Tuple(reject):
reject = [reject]
for d in path:
f = os.path.join(d, file)
if os.path.isfile(f):
try:
st = os.stat(f)
except OSError:
# os.stat() raises OSError, not IOError if the file
# doesn't exist, so in this case we let IOError get
# raised so as to not mask possibly serious disk or
# network issues.
continue
if stat.S_IMODE(st[stat.ST_MODE]) & 0111:
try:
reject.index(f)
except ValueError:
return os.path.normpath(f)
continue
return None
def PrependPath(oldpath, newpath, sep = os.pathsep,
delete_existing=1, canonicalize=None):
"""This prepends newpath elements to the given oldpath. Will only
add any particular path once (leaving the first one it encounters
and ignoring the rest, to preserve path order), and will
os.path.normpath and os.path.normcase all paths to help assure
this. This can also handle the case where the given old path
variable is a list instead of a string, in which case a list will
be returned instead of a string.
Example:
Old Path: "/foo/bar:/foo"
New Path: "/biz/boom:/foo"
Result: "/biz/boom:/foo:/foo/bar"
If delete_existing is 0, then adding a path that exists will
not move it to the beginning; it will stay where it is in the
list.
If canonicalize is not None, it is applied to each element of
newpath before use.
"""
orig = oldpath
is_list = 1
paths = orig
if not is_List(orig) and not is_Tuple(orig):
paths = string.split(paths, sep)
is_list = 0
if is_String(newpath):
newpaths = string.split(newpath, sep)
elif not is_List(newpath) and not is_Tuple(newpath):
newpaths = [ newpath ] # might be a Dir
else:
newpaths = newpath
if canonicalize:
newpaths=map(canonicalize, newpaths)
if not delete_existing:
# First uniquify the old paths, making sure to
# preserve the first instance (in Unix/Linux,
# the first one wins), and remembering them in normpaths.
# Then insert the new paths at the head of the list
# if they're not already in the normpaths list.
result = []
normpaths = []
for path in paths:
if not path:
continue
normpath = os.path.normpath(os.path.normcase(path))
if normpath not in normpaths:
result.append(path)
normpaths.append(normpath)
newpaths.reverse() # since we're inserting at the head
for path in newpaths:
if not path:
continue
normpath = os.path.normpath(os.path.normcase(path))
if normpath not in normpaths:
result.insert(0, path)
normpaths.append(normpath)
paths = result
else:
newpaths = newpaths + paths # prepend new paths
normpaths = []
paths = []
# now we add them only if they are unique
for path in newpaths:
normpath = os.path.normpath(os.path.normcase(path))
if path and not normpath in normpaths:
paths.append(path)
normpaths.append(normpath)
if is_list:
return paths
else:
return string.join(paths, sep)
def AppendPath(oldpath, newpath, sep = os.pathsep,
delete_existing=1, canonicalize=None):
"""This appends new path elements to the given old path. Will
only add any particular path once (leaving the last one it
encounters and ignoring the rest, to preserve path order), and
will os.path.normpath and os.path.normcase all paths to help
assure this. This can also handle the case where the given old
path variable is a list instead of a string, in which case a list
will be returned instead of a string.
Example:
Old Path: "/foo/bar:/foo"
New Path: "/biz/boom:/foo"
Result: "/foo/bar:/biz/boom:/foo"
If delete_existing is 0, then adding a path that exists
will not move it to the end; it will stay where it is in the list.
If canonicalize is not None, it is applied to each element of
newpath before use.
"""
orig = oldpath
is_list = 1
paths = orig
if not is_List(orig) and not is_Tuple(orig):
paths = string.split(paths, sep)
is_list = 0
if is_String(newpath):
newpaths = string.split(newpath, sep)
elif not is_List(newpath) and not is_Tuple(newpath):
newpaths = [ newpath ] # might be a Dir
else:
newpaths = newpath
if canonicalize:
newpaths=map(canonicalize, newpaths)
if not delete_existing:
# add old paths to result, then
# add new paths if not already present
# (I thought about using a dict for normpaths for speed,
# but it's not clear hashing the strings would be faster
# than linear searching these typically short lists.)
result = []
normpaths = []
for path in paths:
if not path:
continue
result.append(path)
normpaths.append(os.path.normpath(os.path.normcase(path)))
for path in newpaths:
if not path:
continue
normpath = os.path.normpath(os.path.normcase(path))
if normpath not in normpaths:
result.append(path)
normpaths.append(normpath)
paths = result
else:
# start w/ new paths, add old ones if not present,
# then reverse.
newpaths = paths + newpaths # append new paths
newpaths.reverse()
normpaths = []
paths = []
# now we add them only if they are unique
for path in newpaths:
normpath = os.path.normpath(os.path.normcase(path))
if path and not normpath in normpaths:
paths.append(path)
normpaths.append(normpath)
paths.reverse()
if is_list:
return paths
else:
return string.join(paths, sep)
if sys.platform == 'cygwin':
def get_native_path(path):
"""Transforms an absolute path into a native path for the system. In
Cygwin, this converts from a Cygwin path to a Windows one."""
return string.replace(os.popen('cygpath -w ' + path).read(), '\n', '')
else:
def get_native_path(path):
"""Transforms an absolute path into a native path for the system.
Non-Cygwin version, just leave the path alone."""
return path
display = DisplayEngine()
def Split(arg):
if is_List(arg) or is_Tuple(arg):
return arg
elif is_String(arg):
return string.split(arg)
else:
return [arg]
class CLVar(UserList):
"""A class for command-line construction variables.
This is a list that uses Split() to split an initial string along
white-space arguments, and similarly to split any strings that get
added. This allows us to Do the Right Thing with Append() and
Prepend() (as well as straight Python foo = env['VAR'] + 'arg1
arg2') regardless of whether a user adds a list or a string to a
command-line construction variable.
"""
def __init__(self, seq = []):
UserList.__init__(self, Split(seq))
def __add__(self, other):
return UserList.__add__(self, CLVar(other))
def __radd__(self, other):
return UserList.__radd__(self, CLVar(other))
def __coerce__(self, other):
return (self, CLVar(other))
def __str__(self):
return string.join(self.data)
# A dictionary that preserves the order in which items are added.
# Submitted by David Benjamin to ActiveState's Python Cookbook web site:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747
# Including fixes/enhancements from the follow-on discussions.
class OrderedDict(UserDict):
def __init__(self, dict = None):
self._keys = []
UserDict.__init__(self, dict)
def __delitem__(self, key):
UserDict.__delitem__(self, key)
self._keys.remove(key)
def __setitem__(self, key, item):
UserDict.__setitem__(self, key, item)
if key not in self._keys: self._keys.append(key)
def clear(self):
UserDict.clear(self)
self._keys = []
def copy(self):
dict = OrderedDict()
dict.update(self)
return dict
def items(self):
return zip(self._keys, self.values())
def keys(self):
return self._keys[:]
def popitem(self):
try:
key = self._keys[-1]
except IndexError:
raise KeyError('dictionary is empty')
val = self[key]
del self[key]
return (key, val)
def setdefault(self, key, failobj = None):
UserDict.setdefault(self, key, failobj)
if key not in self._keys: self._keys.append(key)
def update(self, dict):
for (key, val) in dict.items():
self.__setitem__(key, val)
def values(self):
return map(self.get, self._keys)
class Selector(OrderedDict):
"""A callable ordered dictionary that maps file suffixes to
dictionary values. We preserve the order in which items are added
so that get_suffix() calls always return the first suffix added."""
def __call__(self, env, source, ext=None):
if ext is None:
try:
ext = source[0].suffix
except IndexError:
ext = ""
try:
return self[ext]
except KeyError:
# Try to perform Environment substitution on the keys of
# the dictionary before giving up.
s_dict = {}
for (k,v) in self.items():
if k is not None:
s_k = env.subst(k)
if s_dict.has_key(s_k):
# We only raise an error when variables point
# to the same suffix. If one suffix is literal
# and a variable suffix contains this literal,
# the literal wins and we don't raise an error.
raise KeyError, (s_dict[s_k][0], k, s_k)
s_dict[s_k] = (k,v)
try:
return s_dict[ext][1]
except KeyError:
try:
return self[None]
except KeyError:
return None
if sys.platform == 'cygwin':
# On Cygwin, os.path.normcase() lies, so just report back the
# fact that the underlying Windows OS is case-insensitive.
def case_sensitive_suffixes(s1, s2):
return 0
else:
def case_sensitive_suffixes(s1, s2):
return (os.path.normcase(s1) != os.path.normcase(s2))
def adjustixes(fname, pre, suf, ensure_suffix=False):
if pre:
path, fn = os.path.split(os.path.normpath(fname))
if fn[:len(pre)] != pre:
fname = os.path.join(path, pre + fn)
# Only append a suffix if the suffix we're going to add isn't already
# there, and if either we've been asked to ensure the specific suffix
# is present or there's no suffix on it at all.
if suf and fname[-len(suf):] != suf and \
(ensure_suffix or not splitext(fname)[1]):
fname = fname + suf
return fname
# From Tim Peters,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# (Also in the printed Python Cookbook.)
def unique(s):
"""Return a list of the elements in s, but without duplicates.
For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3],
unique("abcabc") some permutation of ["a", "b", "c"], and
unique(([1, 2], [2, 3], [1, 2])) some permutation of
[[2, 3], [1, 2]].
For best speed, all sequence elements should be hashable. Then
unique() will usually work in linear time.
If not possible, the sequence elements should enjoy a total
ordering, and if list(s).sort() doesn't raise TypeError it's
assumed that they do enjoy a total ordering. Then unique() will
usually work in O(N*log2(N)) time.
If that's not possible either, the sequence elements must support
equality-testing. Then unique() will usually work in quadratic
time.
"""
n = len(s)
if n == 0:
return []
# Try using a dict first, as that's the fastest and will usually
# work. If it doesn't work, it will usually fail quickly, so it
# usually doesn't cost much to *try* it. It requires that all the
# sequence elements be hashable, and support equality comparison.
u = {}
try:
for x in s:
u[x] = 1
except TypeError:
pass # move on to the next method
else:
return u.keys()
del u
# We can't hash all the elements. Second fastest is to sort,
# which brings the equal elements together; then duplicates are
# easy to weed out in a single pass.
# NOTE: Python's list.sort() was designed to be efficient in the
# presence of many duplicate elements. This isn't true of all
# sort functions in all languages or libraries, so this approach
# is more effective in Python than it may be elsewhere.
try:
t = list(s)
t.sort()
except TypeError:
pass # move on to the next method
else:
assert n > 0
last = t[0]
lasti = i = 1
while i < n:
if t[i] != last:
t[lasti] = last = t[i]
lasti = lasti + 1
i = i + 1
return t[:lasti]
del t
# Brute force is all that's left.
u = []
for x in s:
if x not in u:
u.append(x)
return u
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# A more efficient implementation of Alex's uniquer(), this avoids the
# idfun() argument and function-call overhead by assuming that all
# items in the sequence are hashable.
def uniquer_hashables(seq):
seen = {}
result = []
for item in seq:
#if not item in seen:
if not seen.has_key(item):
seen[item] = 1
result.append(item)
return result
# Much of the logic here was originally based on recipe 4.9 from the
# Python CookBook, but we had to dumb it way down for Python 1.5.2.
class LogicalLines:
def __init__(self, fileobj):
self.fileobj = fileobj
def readline(self):
result = []
while 1:
line = self.fileobj.readline()
if not line:
break
if line[-2:] == '\\\n':
result.append(line[:-2])
else:
result.append(line)
break
return string.join(result, '')
def readlines(self):
result = []
while 1:
line = self.readline()
if not line:
break
result.append(line)
return result
class UniqueList(UserList):
def __init__(self, seq = []):
UserList.__init__(self, seq)
self.unique = True
def __make_unique(self):
if not self.unique:
self.data = uniquer_hashables(self.data)
self.unique = True
def __lt__(self, other):
self.__make_unique()
return UserList.__lt__(self, other)
def __le__(self, other):
self.__make_unique()
return UserList.__le__(self, other)
def __eq__(self, other):
self.__make_unique()
return UserList.__eq__(self, other)
def __ne__(self, other):
self.__make_unique()
return UserList.__ne__(self, other)
def __gt__(self, other):
self.__make_unique()
return UserList.__gt__(self, other)
def __ge__(self, other):
self.__make_unique()
return UserList.__ge__(self, other)
def __cmp__(self, other):
self.__make_unique()
return UserList.__cmp__(self, other)
def __len__(self):
self.__make_unique()
return UserList.__len__(self)
def __getitem__(self, i):
self.__make_unique()
return UserList.__getitem__(self, i)
def __setitem__(self, i, item):
UserList.__setitem__(self, i, item)
self.unique = False
def __getslice__(self, i, j):
self.__make_unique()
return UserList.__getslice__(self, i, j)
def __setslice__(self, i, j, other):
UserList.__setslice__(self, i, j, other)
self.unique = False
def __add__(self, other):
result = UserList.__add__(self, other)
result.unique = False
return result
def __radd__(self, other):
result = UserList.__radd__(self, other)
result.unique = False
return result
def __iadd__(self, other):
result = UserList.__iadd__(self, other)
result.unique = False
return result
def __mul__(self, other):
result = UserList.__mul__(self, other)
result.unique = False
return result
def __rmul__(self, other):
result = UserList.__rmul__(self, other)
result.unique = False
return result
def __imul__(self, other):
result = UserList.__imul__(self, other)
result.unique = False
return result
def append(self, item):
UserList.append(self, item)
self.unique = False
def insert(self, i):
UserList.insert(self, i)
self.unique = False
def count(self, item):
self.__make_unique()
return UserList.count(self, item)
def index(self, item):
self.__make_unique()
return UserList.index(self, item)
def reverse(self):
self.__make_unique()
UserList.reverse(self)
def sort(self, *args, **kwds):
self.__make_unique()
#return UserList.sort(self, *args, **kwds)
return apply(UserList.sort, (self,)+args, kwds)
def extend(self, other):
UserList.extend(self, other)
self.unique = False
class Unbuffered:
"""
A proxy class that wraps a file object, flushing after every write,
and delegating everything else to the wrapped object.
"""
def __init__(self, file):
self.file = file
def write(self, arg):
try:
self.file.write(arg)
self.file.flush()
except IOError:
# Stdout might be connected to a pipe that has been closed
# by now. The most likely reason for the pipe being closed
# is that the user has press ctrl-c. It this is the case,
# then SCons is currently shutdown. We therefore ignore
# IOError's here so that SCons can continue and shutdown
# properly so that the .sconsign is correctly written
# before SCons exits.
pass
def __getattr__(self, attr):
return getattr(self.file, attr)
def make_path_relative(path):
""" makes an absolute path name to a relative pathname.
"""
if os.path.isabs(path):
drive_s,path = os.path.splitdrive(path)
import re
if not drive_s:
path=re.compile("/*(.*)").findall(path)[0]
else:
path=path[1:]
assert( not os.path.isabs( path ) ), path
return path
# The original idea for AddMethod() and RenameFunction() come from the
# following post to the ActiveState Python Cookbook:
#
# ASPN: Python Cookbook : Install bound methods in an instance
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/223613
#
# That code was a little fragile, though, so the following changes
# have been wrung on it:
#
# * Switched the installmethod() "object" and "function" arguments,
# so the order reflects that the left-hand side is the thing being
# "assigned to" and the right-hand side is the value being assigned.
#
# * Changed explicit type-checking to the "try: klass = object.__class__"
# block in installmethod() below so that it still works with the
# old-style classes that SCons uses.
#
# * Replaced the by-hand creation of methods and functions with use of
# the "new" module, as alluded to in Alex Martelli's response to the
# following Cookbook post:
#
# ASPN: Python Cookbook : Dynamically added methods to a class
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/81732
def AddMethod(object, function, name = None):
"""
Adds either a bound method to an instance or an unbound method to
a class. If name is ommited the name of the specified function
is used by default.
Example:
a = A()
def f(self, x, y):
self.z = x + y
AddMethod(f, A, "add")
a.add(2, 4)
print a.z
AddMethod(lambda self, i: self.l[i], a, "listIndex")
print a.listIndex(5)
"""
import new
if name is None:
name = function.func_name
else:
function = RenameFunction(function, name)
try:
klass = object.__class__
except AttributeError:
# "object" is really a class, so it gets an unbound method.
object.__dict__[name] = new.instancemethod(function, None, object)
else:
# "object" is really an instance, so it gets a bound method.
object.__dict__[name] = new.instancemethod(function, object, klass)
def RenameFunction(function, name):
"""
Returns a function identical to the specified function, but with
the specified name.
"""
import new
# Compatibility for Python 1.5 and 2.1. Can be removed in favor of
# passing function.func_defaults directly to new.function() once
# we base on Python 2.2 or later.
func_defaults = function.func_defaults
if func_defaults is None:
func_defaults = ()
return new.function(function.func_code,
function.func_globals,
name,
func_defaults)
md5 = False
def MD5signature(s):
return str(s)
def MD5filesignature(fname, chunksize=65536):
f = open(fname, "rb")
result = f.read()
f.close()
return result
try:
import hashlib
except ImportError:
pass
else:
if hasattr(hashlib, 'md5'):
md5 = True
def MD5signature(s):
m = hashlib.md5()
m.update(str(s))
return m.hexdigest()
def MD5filesignature(fname, chunksize=65536):
m = hashlib.md5()
f = open(fname, "rb")
while 1:
blck = f.read(chunksize)
if not blck:
break
m.update(str(blck))
f.close()
return m.hexdigest()
def MD5collect(signatures):
"""
Collects a list of signatures into an aggregate signature.
signatures - a list of signatures
returns - the aggregate signature
"""
if len(signatures) == 1:
return signatures[0]
else:
return MD5signature(string.join(signatures, ', '))
# Wrap the intern() function so it doesn't throw exceptions if ineligible
# arguments are passed. The intern() function was moved into the sys module in
# Python 3.
try:
intern
except NameError:
from sys import intern
def silent_intern(x):
"""
Perform intern() on the passed argument and return the result.
If the input is ineligible (e.g. a unicode string) the original argument is
returned and no exception is thrown.
"""
try:
return intern(x)
except TypeError:
return x
# From Dinu C. Gherman,
# Python Cookbook, second edition, recipe 6.17, p. 277.
# Also:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/68205
# ASPN: Python Cookbook: Null Object Design Pattern
# TODO(1.5):
#class Null(object):
class Null:
""" Null objects always and reliably "do nothing." """
def __new__(cls, *args, **kwargs):
if not '_inst' in vars(cls):
#cls._inst = type.__new__(cls, *args, **kwargs)
cls._inst = apply(type.__new__, (cls,) + args, kwargs)
return cls._inst
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self
def __repr__(self):
return "Null(0x%08X)" % id(self)
def __nonzero__(self):
return False
def __getattr__(self, name):
return self
def __setattr__(self, name, value):
return self
def __delattr__(self, name):
return self
class NullSeq(Null):
def __len__(self):
return 0
def __iter__(self):
return iter(())
def __getitem__(self, i):
return self
def __delitem__(self, i):
return self
def __setitem__(self, i, v):
return self
del __revision__
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
bleepbloop/Pivy
|
scons/scons-local-1.2.0.d20090919/SCons/Util.py
|
Python
|
isc
| 53,960
|
[
"VisIt"
] |
8a53385b24539aaf0c14b70b30057b400f5bd9ecd1f7159af912d04ad5e9c915
|
from __future__ import division
import numpy as np
from ase.constraints import FixConstraint
from ase.utils.geometry import find_mic
from ase.constraints import Hookean
class SlowGrowthBondLength(FixConstraint):
"""Constraint object for fixing a bond length."""
def __init__(self, a1, a2, bond_speed=0., direction=None):
"""Linearly tweak distance between atoms with indices a1 and a2. If mic is
True, follows the minimum image convention to keep constant the
shortest distance between a1 and a2 in any periodic direction.
atoms only needs to be supplied if mic=True.
Bond speed is measured in Angstrom per timestep
"""
self.indices = [a1, a2]
self.constraint_force = None
self.bond_speed = bond_speed
# self.fourindices = None
if direction is None:
self.direction = None # standard distance constraint
elif len(direction) == 2:
self.adjust = True
self.direction = 'adjust' # self-adjusting direction of constraint
else:
self.direction = np.array(direction) / np.linalg.norm(direction) # fixed direction of constraint
def adjust_direction(self, atoms):
if not self.adjust:
return
p1, p2 = atoms.positions[self.indices]
direction, _ = find_mic(np.array([p2 - p1]), atoms._cell, pbc=False)
direction = direction[0]
direction[2] = 0.
distance = np.linalg.norm(direction)
direction /= distance
self.direction = direction
self.distance = distance
def adjust_positions(self, atoms, new):
p1, p2 = atoms.positions[self.indices]
d_old, p_old = find_mic(np.array([p2 - p1]), atoms._cell, pbc=False)
d_old = d_old[0]
q1, q2 = new[self.indices]
d_new, p_new = find_mic(np.array([q2 - q1]), atoms._cell, pbc=False)
d_new = d_new[0]
if self.direction is None:
# amount to put back distance to old value
delta_d = 0.5 * d_new * (p_old - p_new) / p_new
# desired increase amount
sbit = 0.5 * d_new / p_new * self.bond_speed
else:
self.adjust_direction(atoms)
p_old = np.abs(np.dot(d_old, self.direction))
p_new = np.dot(d_new, self.direction)
swapsign = np.sign(p_new)
length_new = np.abs(p_new)
# amount to put back distance to old value by resizing distance vector d_new
delta_d = 0.5 * (p_old - p_new) * self.direction * swapsign
# desired increase amount
sbit = 0.5 * self.direction * self.bond_speed * swapsign
new[self.indices] = (q1 - delta_d - sbit, q2 + delta_d + sbit)
self.distance = p_old + self.bond_speed
def adjust_forces(self, atoms, forces):
d = np.subtract.reduce(atoms.positions[self.indices])
d, p = find_mic(np.array([d]), atoms._cell, pbc=False)
d = d[0]
if self.direction is None:
f = 0.5 * d * np.dot(np.subtract.reduce(forces[self.indices]), d) / p**2
else:
if self.adjust:
self.adjust_direction(atoms)
f = 0.5 * np.dot(np.subtract.reduce(forces[self.indices]), self.direction) * self.direction
else:
f = 0.5 * np.dot(np.subtract.reduce(forces[self.indices]), self.direction) * np.sign(np.dot(d, self.direction)) * self.direction
self.constraint_force = f
forces[self.indices] += (-f, f)
def index_shuffle(self, atoms, ind):
"""Shuffle the indices of the two atoms in this constraint"""
newa = [-1, -1] # Signal error
for new, old in slice2enlist(ind, len(atoms)):
for i, a in enumerate(self.indices):
if old == a:
newa[i] = new
if newa[0] == -1 or newa[1] == -1:
raise IndexError('Constraint not part of slice')
self.indices = newa
def get_constraint_force(self, atoms=None):
"""Return the (scalar) force required to maintain the constraint"""
return self.constraint_force
def __repr__(self):
return 'FixBondLength(%d, %d)' % tuple(self.indices)
def todict(self):
return {'name': 'FixBondLength',
'kwargs': {'a1': self.indices[0], 'a2': self.indices[1]}}
# EDITED VERSION OF Hookean CONSTRAINT (ase.constraint.Hookean)
def adjust_potential_energy(self, atoms):
"""Returns the difference to the potential energy due to an active
constraint. (That is, the quantity returned is to be added to the
potential energy.)"""
positions = atoms.positions
if self._type == 'plane':
A, B, C, D = self.plane
x, y, z = positions[self.index]
d = ((A * x + B * y + C * z + D) /
np.sqrt(A**2 + B**2 + C**2))
return 0.5 * self.spring * d**2
if self._type == 'two atoms':
p1, p2 = positions[self.indices]
elif self._type == 'point':
p1 = positions[self.index]
p2 = self.origin
displace = p2 - p1
bondlength = np.linalg.norm(displace)
if bondlength > self.threshold:
return 0.5 * self.spring * (bondlength - self.threshold)**2
else:
return 0.
def adjust_forces(self, atoms, forces):
positions = atoms.positions
if self._type == 'plane':
A, B, C, D = self.plane
x, y, z = positions[self.index]
d = ((A * x + B * y + C * z + D) /
np.sqrt(A**2 + B**2 + C**2))
magnitude = self.spring * d
direction = - np.array((A, B, C)) / np.linalg.norm((A, B, C))
forces[self.index] += direction * magnitude
return
if self._type == 'two atoms':
p1, p2 = positions[self.indices]
elif self._type == 'point':
p1 = positions[self.index]
p2 = self.origin
displace = p2 - p1
bondlength = np.linalg.norm(displace)
if bondlength > self.threshold:
magnitude = self.spring * (bondlength - self.threshold)
direction = displace / np.linalg.norm(displace)
if self._type == 'two atoms':
forces[self.indices[0]] += direction * magnitude
forces[self.indices[1]] -= direction * magnitude
else:
forces[self.index] += direction * magnitude
setattr(Hookean, 'adjust_potential_energy', adjust_potential_energy)
setattr(Hookean, 'adjust_forces', adjust_forces)
|
marcocaccin/MLTI
|
step1_TI/ase_addon.py
|
Python
|
gpl-3.0
| 6,689
|
[
"ASE"
] |
bb886490bb9dbf1a4288bcf31c88edc160b328603edb752352e62585a890038d
|
import numpy as np
from gpaw import debug, dry_run
from gpaw.mpi import world, serial_comm, _Communicator, \
SerialCommunicator, DryRunCommunicator
even_comm = world.new_communicator(np.arange(0, world.size, 2))
if world.size > 1:
odd_comm = world.new_communicator(np.arange(1, world.size, 2))
else:
odd_comm = None
if world.rank % 2 == 0:
assert odd_comm is None
comm = even_comm
else:
assert even_comm is None
comm = odd_comm
hasmpi = False
try:
import _gpaw
hasmpi = hasattr(_gpaw, 'Communicator')
except ImportError, AttributeError:
pass
assert world.parent is None
assert comm.parent is world
if hasmpi:
assert comm.parent.get_c_object() is world.get_c_object()
assert comm.get_c_object().parent is world.get_c_object()
commranks = np.arange(world.rank % 2, world.size, 2)
assert np.all(comm.get_members() == commranks)
assert comm.get_members()[comm.rank] == world.rank
subcomm = comm.new_communicator(np.array([comm.rank]))
assert subcomm.parent is comm
assert subcomm.rank == 0 and subcomm.size == 1
assert subcomm.get_members().item() == comm.rank
if debug:
assert isinstance(world, _Communicator)
assert isinstance(comm, _Communicator)
assert isinstance(subcomm, _Communicator)
elif world is serial_comm:
assert isinstance(world, SerialCommunicator)
assert isinstance(comm, SerialCommunicator)
assert isinstance(subcomm, SerialCommunicator)
elif hasmpi:
assert isinstance(world, _gpaw.Communicator)
assert isinstance(comm, _gpaw.Communicator)
assert isinstance(subcomm, _gpaw.Communicator)
|
robwarm/gpaw-symm
|
gpaw/test/mpicomm.py
|
Python
|
gpl-3.0
| 1,609
|
[
"GPAW"
] |
1ae308dc464591f421eec74cca24a574b58f2c61e5732f7fa8e6ae9ae8f7dc0c
|
"""Setup script for Bokeh."""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENCE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
import os, platform, re, shutil, site, subprocess, sys, time
from os.path import abspath, dirname, exists, isdir, join, realpath, relpath
from shutil import copy
try:
import colorama
def bright(text): return "%s%s%s" % (colorama.Style.BRIGHT, text, colorama.Style.RESET_ALL)
def dim(text): return "%s%s%s" % (colorama.Style.DIM, text, colorama.Style.RESET_ALL)
def white(text): return "%s%s%s" % (colorama.Fore.WHITE, text, colorama.Style.RESET_ALL)
def blue(text): return "%s%s%s" % (colorama.Fore.BLUE, text, colorama.Style.RESET_ALL)
def red(text): return "%s%s%s" % (colorama.Fore.RED, text, colorama.Style.RESET_ALL)
def green(text): return "%s%s%s" % (colorama.Fore.GREEN, text, colorama.Style.RESET_ALL)
def yellow(text): return "%s%s%s" % (colorama.Fore.YELLOW, text, colorama.Style.RESET_ALL)
except ImportError:
def bright(text): return text
def dim(text): return text
def white(text) : return text
def blue(text) : return text
def red(text) : return text
def green(text) : return text
def yellow(text) : return text
if 'nightly' in sys.argv:
from setuptools import setup
sys.argv.remove('nightly')
with open('__conda_version__.txt', 'r') as f:
version = f.read().rstrip()
vers_file = os.path.join('bokeh', '__conda_version__.py')
with open(vers_file, 'w') as f:
f.write("conda_version=" + "'" + version + "'")
else:
from distutils.core import setup
from distutils import dir_util
# Our own imports
import versioneer
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
ROOT = dirname(realpath(__file__))
BOKEHJSROOT = join(ROOT, 'bokehjs')
BOKEHJSBUILD = join(BOKEHJSROOT, 'build')
CSS = join(BOKEHJSBUILD, 'css')
JS = join(BOKEHJSBUILD, 'js')
SERVER = join(ROOT, 'bokeh/server')
if sys.version_info[0] < 3:
input = raw_input
# -----------------------------------------------------------------------------
# Local utilities
# -----------------------------------------------------------------------------
versioneer.versionfile_source = 'bokeh/_version.py'
versioneer.versionfile_build = 'bokeh/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'Bokeh-' # dirname like 'myproject-1.2.0'
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
copy("LICENSE.txt", "bokeh/")
package_data = ['LICENSE.txt']
def package_path(path, filters=()):
if not os.path.exists(path):
raise RuntimeError("packaging non-existent path: %s" % path)
elif os.path.isfile(path):
package_data.append(relpath(path, 'bokeh'))
else:
for path, dirs, files in os.walk(path):
path = relpath(path, 'bokeh')
for f in files:
if not filters or f.endswith(filters):
package_data.append(join(path, f))
# You can't install Bokeh in a virtualenv because the lack of getsitepackages()
# This is an open bug: https://github.com/pypa/virtualenv/issues/355
# And this is an intended PR to fix it: https://github.com/pypa/virtualenv/pull/508
# Workaround to fix our issue: https://github.com/bokeh/bokeh/issues/378
def getsitepackages():
"""Returns a list containing all global site-packages directories
(and possibly site-python)."""
_is_64bit = (getattr(sys, 'maxsize', None) or getattr(sys, 'maxint')) > 2**32
_is_pypy = hasattr(sys, 'pypy_version_info')
_is_jython = sys.platform[:4] == 'java'
prefixes = [sys.prefix, sys.exec_prefix]
sitepackages = []
seen = set()
for prefix in prefixes:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys.prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
if _is_64bit:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
if sys.version_info[0] >= 3:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[0],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
sitepackages.append(os.path.abspath(sitedir))
sitepackages = [p for p in sitepackages if os.path.isdir(p)]
return sitepackages
def check_remove_bokeh_install(site_packages):
bokeh_path = join(site_packages, "bokeh")
if not (exists(bokeh_path) and isdir(bokeh_path)):
return
prompt = "Found existing bokeh install: %s\nRemove it? [y|N] " % bokeh_path
val = input(prompt)
if val == "y":
print("Removing old bokeh install...", end=" ")
try:
shutil.rmtree(bokeh_path)
print("Done")
except (IOError, OSError):
print("Unable to remove old bokeh at %s, exiting" % bokeh_path)
sys.exit(-1)
else:
print("Not removing old bokeh install")
sys.exit(1)
def remove_bokeh_pth(path_file):
if exists(path_file):
try:
os.remove(path_file)
except (IOError, OSError):
print("Unable to remove old path file at %s, exiting" % path_file)
sys.exit(-1)
return True
return False
BUILD_EXEC_FAIL_MSG = bright(red("Failed.")) + """
ERROR: subprocess.Popen(%r) failed to execute:
%s
Have you run `npm install` from the bokehjs subdirectory?
For more information, see the Dev Guide:
http://bokeh.pydata.org/en/latest/docs/dev_guide.html
"""
BUILD_FAIL_MSG = bright(red("Failed.")) + """
ERROR: 'gulp build' returned error message:
%s
"""
BUILD_SIZE_FAIL_MSG = """
ERROR: could not determine sizes:
%s
"""
BUILD_SUCCESS_MSG = bright(green("Success!")) + """
Build output:
%s"""
def build_js():
print("Building BokehJS... ", end="")
sys.stdout.flush()
os.chdir('bokehjs')
if sys.platform != "win32":
cmd = [join('node_modules', '.bin', 'gulp'), 'build']
else:
cmd = [join('node_modules', '.bin', 'gulp.cmd'), 'build']
t0 = time.time()
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
print(BUILD_EXEC_FAIL_MSG % (cmd, e))
sys.exit(1)
finally:
os.chdir('..')
result = proc.wait()
t1 = time.time()
if result != 0:
indented_msg = ""
msg = proc.stderr.read().decode('ascii', errors='ignore')
msg = "\n".join([" " + x for x in msg.split("\n")])
print(BUILD_FAIL_MSG % red(msg))
sys.exit(1)
indented_msg = ""
msg = proc.stdout.read().decode('ascii', errors='ignore')
pat = re.compile(r"(\[.*\]) (.*)", re.DOTALL)
for line in msg.strip().split("\n"):
stamp, txt = pat.match(line).groups()
indented_msg += " " + dim(green(stamp)) + " " + dim(txt) + "\n"
msg = "\n".join([" " + x for x in msg.split("\n")])
print(BUILD_SUCCESS_MSG % indented_msg)
print("Build time: %s" % bright(yellow("%0.1f seconds" % (t1-t0))))
print()
print("Build artifact sizes:")
try:
def size(*path):
return os.stat(join("bokehjs", "build", *path)).st_size / 2**10
print(" - bokeh.js : %6.1f KB" % size("js", "bokeh.js"))
print(" - bokeh.css : %6.1f KB" % size("css", "bokeh.css"))
print(" - bokeh.min.js : %6.1f KB" % size("js", "bokeh.min.js"))
print(" - bokeh.min.css : %6.1f KB" % size("css", "bokeh.min.css"))
print(" - bokeh-widgets.js : %6.1f KB" % size("js", "bokeh-widgets.js"))
print(" - bokeh-widgets.css : %6.1f KB" % size("css", "bokeh-widgets.css"))
print(" - bokeh-widgets.min.js : %6.1f KB" % size("js", "bokeh-widgets.min.js"))
print(" - bokeh-widgets.min.css : %6.1f KB" % size("css", "bokeh-widgets.min.css"))
except Exception as e:
print(BUILD_SIZE_FAIL_MSG % e)
def install_js():
target_jsdir = join(SERVER, 'static', 'js')
target_cssdir = join(SERVER, 'static', 'css')
STATIC_ASSETS = [
join(JS, 'bokeh.js'),
join(JS, 'bokeh.min.js'),
join(CSS, 'bokeh.css'),
join(CSS, 'bokeh.min.css'),
]
if not all([exists(a) for a in STATIC_ASSETS]):
print("""
ERROR: Cannot install BokehJS: files missing in `./bokehjs/build`.
Please build BokehJS by running setup.py with the `--build_js` option.
Dev Guide: http://bokeh.pydata.org/docs/dev_guide.html#bokehjs.
""")
sys.exit(1)
if exists(target_jsdir):
shutil.rmtree(target_jsdir)
shutil.copytree(JS, target_jsdir)
if exists(target_cssdir):
shutil.rmtree(target_cssdir)
shutil.copytree(CSS, target_cssdir)
def clean():
print("Removing prior-built items...", end=" ")
build_dir = 'build/lib/bokeh'
if os.path.exists(build_dir):
dir_util.remove_tree(build_dir)
for root, dirs, files in os.walk('.'):
for item in files:
if item.endswith('.pyc'):
os.remove(os.path.join(root, item))
print("Done")
def get_user_jsargs():
print("""
Bokeh includes a JavaScript library (BokehJS) that has its own
build process. How would you like to handle BokehJS:
1) build and install fresh BokehJS
2) install last built BokehJS from bokeh/bokehjs/build
""")
mapping = {"1": True, "2": False}
value = input("Choice? ")
while value not in mapping:
print("Input '%s' not understood. Valid choices: 1, 2\n" % value)
value = input("Choice? ")
return mapping[value]
def parse_jsargs():
options = ('install', 'develop', 'sdist', 'egg_info', 'build')
installing = any(arg in sys.argv for arg in options)
if '--build_js' in sys.argv:
if not installing:
print("Error: Option '--build_js' only valid with 'install', 'develop', 'sdist', or 'build', exiting.")
sys.exit(1)
jsbuild = True
sys.argv.remove('--build_js')
elif '--install_js' in sys.argv:
# Note that --install_js can be used by itself (without sdist/install/develop)
jsbuild = False
sys.argv.remove('--install_js')
else:
if installing:
jsbuild = get_user_jsargs()
else:
jsbuild = False
return jsbuild
# -----------------------------------------------------------------------------
# Main script
# -----------------------------------------------------------------------------
# Aliases for build_js and install_js
for i in range(len(sys.argv)):
if sys.argv[i] == '--build-js':
sys.argv[i] = '--build_js'
if sys.argv[i] == '--install-js':
sys.argv[i] = '--install_js'
# Set up this checkout or source archive with the right BokehJS files.
if sys.version_info[:2] < (2, 6):
raise RuntimeError("Bokeh requires python >= 2.6")
# Lightweight command to only install js and nothing more - developer mode
if len(sys.argv) == 2 and sys.argv[-1] == '--install_js':
install_js()
sys.exit(0)
# check for 'sdist' and make sure we always do a BokehJS build when packaging
if "sdist" in sys.argv:
if "--install_js" in sys.argv:
print("Removing '--install_js' incompatible with 'sdist'")
sys.argv.remove('--install_js')
if "--build_js" not in sys.argv:
print("Adding '--build_js' required for 'sdist'")
sys.argv.append('--build_js')
# check for package install, set jsinstall to False to skip prompt
jsinstall = True
if not exists(join(ROOT, 'MANIFEST.in')):
if "--build_js" in sys.argv or "--install_js" in sys.argv:
print("BokehJS source code is not shipped in sdist packages; "
"building/installing from the bokehjs source directory is disabled. "
"To build or develop BokehJS yourself, you must clone the full "
"Bokeh repository from https://github.com/bokeh/bokeh")
if "--build_js" in sys.argv:
sys.argv.remove('--build_js')
if "--install_js" in sys.argv:
sys.argv.remove('--install_js')
jsbuild = False
jsinstall = False
else:
jsbuild = parse_jsargs()
if jsbuild:
build_js()
if jsinstall:
install_js()
sampledata_suffixes = ('.csv', '.conf', '.gz', '.json', '.png', '.ics')
package_path(join(SERVER, 'static'))
package_path(join(SERVER, '_templates'))
package_path(join(ROOT, 'bokeh', '_templates'))
package_path(join(ROOT, 'bokeh', 'sampledata'), sampledata_suffixes)
package_path(join(ROOT, 'bokeh', 'server', 'redis.conf'))
scripts = ['bokeh-server', 'websocket_worker.py']
if '--user' in sys.argv:
site_packages = site.USER_SITE
else:
site_packages = getsitepackages()[0]
path_file = join(site_packages, "bokeh.pth")
path = abspath(dirname(__file__))
print()
if 'develop' in sys.argv:
check_remove_bokeh_install(site_packages)
with open(path_file, "w+") as f:
f.write(path)
print("Installing Bokeh for development:")
print(" - writing path '%s' to %s" % (path, path_file))
if jsinstall:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if jsbuild else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % yellow("PACKAGED"))
sys.exit()
elif 'clean' in sys.argv:
clean()
elif 'install' in sys.argv:
pth_removed = remove_bokeh_pth(path_file)
print("Installing Bokeh:")
if pth_removed:
print(" - removed path file at %s" % path_file)
if jsinstall:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if jsbuild else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % bright(yellow("PACKAGED")))
elif '--help' in sys.argv:
if jsinstall:
print("Bokeh-specific options available with 'install' or 'develop':")
print()
print(" --build_js build and install a fresh BokehJS")
print(" --install_js install only last previously built BokehJS")
else:
print("Bokeh is using PACKAGED BokehJS, located in 'bokeh.server.static'")
print()
print()
REQUIRES = [
'six>=1.5.2',
'requests>=1.2.3',
'PyYAML>=3.10',
'python-dateutil>=2.1',
'Jinja2>=2.7',
'numpy>=1.7.1',
'pandas>=0.11.0',
'Flask>=0.10.1',
'pyzmq>=14.3.1',
'tornado>=4.0.1',
]
_version = versioneer.get_version()
_cmdclass = versioneer.get_cmdclass()
# Horrible hack: workaround to allow creation of bdist_whell on pip installation
# Why, for God's sake, is pip forcing the generation of wheels when installing a package?
try:
from wheel.bdist_wheel import bdist_wheel
except ImportError as e:
# pip is not claiming for bdist_wheel when wheel is not installed
bdist_wheel = None
if bdist_wheel is not None:
_cmdclass["bdist_wheel"] = bdist_wheel
setup(
name='bokeh',
version=_version,
cmdclass=_cmdclass,
packages=[
'bokeh',
'bokeh.models',
'bokeh.models.tests',
'bokeh.models.widgets',
'bokeh.charts',
'bokeh.charts.builders',
'bokeh.charts.builders.tests',
'bokeh.charts.tests',
'bokeh._legacy_charts',
'bokeh._legacy_charts.builder',
'bokeh._legacy_charts.builder.tests',
'bokeh._legacy_charts.tests',
'bokeh.compat',
'bokeh.compat.mplexporter',
'bokeh.compat.mplexporter.renderers',
'bokeh.crossfilter',
'bokeh.sampledata',
'bokeh.server',
'bokeh.server.models',
'bokeh.server.storage',
'bokeh.server.tests',
'bokeh.server.utils',
'bokeh.server.views',
'bokeh.server.websocket',
'bokeh.server.zmq',
'bokeh.sphinxext',
'bokeh.tests',
'bokeh.transforms',
'bokeh.util',
'bokeh.util.tests',
'bokeh.validation',
],
package_data={'bokeh': package_data},
author='Continuum Analytics',
author_email='info@continuum.io',
url='http://github.com/bokeh/bokeh',
description='Statistical and novel interactive HTML plots for Python',
license='New BSD',
scripts=scripts,
zip_safe=False,
install_requires=REQUIRES
)
|
ChinaQuants/bokeh
|
setup.py
|
Python
|
bsd-3-clause
| 19,865
|
[
"GULP"
] |
4d71566742bad4e900ff91e4ead261ea48cffab83e3a2e7a11139f64681af237
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
def activation_statistics(init_func=lambda fan_in, fan_out: np.random.randn(fan_in, fan_out) * 0.001, nonlinearity='tanh'):
"""TODO: Docstring for activation_statistics.
Demonstrate activation statistics with different weight initialization
:init_func: TODO
:returns: TODO
"""
# assume some uni gaussian 10-D input data
D = np.random.randn(1000, 500)
hidden_layer_sizes = [500]*10
nonlinearities = [nonlinearity]*len(hidden_layer_sizes)
activate_func = {
'relu': lambda x: np.maximum(0,x),
'tanh': lambda x: np.tanh(x),
}
Hs = {}
for i in range(len(hidden_layer_sizes)):
X = D if i == 0 else Hs[i-1] # input at this layer
fan_in = X.shape[1]
fan_out = hidden_layer_sizes[i]
W = init_func(fan_in, fan_out) # layer initialization
H = np.dot(X, W) # matrix multiply
H = activate_func[nonlinearities[i]](H) # nonlinearities
Hs[i] = H # cache result on this layer
# look at the distribution at each layer
print(('input layer had mean %f and std %f' %(np.mean(D), np.std(D))))
layer_means = [np.mean(H) for i, H in Hs.items()]
layer_stds = [np.std(H) for i, H in Hs.items()]
for i, H in Hs.items():
print(('hidden layer %d had mean %f and std %f' % (i+1, layer_means[i], layer_stds[i])))
# plot the means and standard deviations
plt.figure()
plt.subplot(121)
plt.plot(list(Hs.keys()), layer_means, 'ob-')
plt.title('layer mean')
plt.subplot(122)
plt.plot(list(Hs.keys()), layer_stds, 'or-')
plt.title('layer std')
# plot the raw distribution
plt.figure()
for i,H in Hs.items():
plt.subplot(2, len(Hs)/2, i+1)
plt.hist(H.ravel(), 30, range=(-1,1,))
|
Alexoner/skynet
|
skynet/neural_network/activation_statistics.py
|
Python
|
mit
| 1,860
|
[
"Gaussian"
] |
e5754793ee57247ba2fb8259e0697cf488f40071d539bf692dedc08d5af71a69
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import uuid
import logging
from typing import Mapping
import networkx as nx
from copy import deepcopy, copy
import pybel
import pybel.constants as pc
from pybel.dsl import *
# This is redundant but needed for documentation build
from pybel.dsl import Entity
from pybel.language import pmod_mappings, pmod_namespace, activity_mapping
try: # this works after pybel pull request #453
from pybel.language import citation_dict
except ImportError: # this works before pybel pull request #453
from pybel.utils import citation_dict
from indra.statements import *
from indra.databases import hgnc_client
logger = logging.getLogger(__name__)
_indra_pybel_act_map: Mapping[str, Entity] = {
'activity': activity_mapping['act'],
'kinase': activity_mapping['kin'],
'phosphatase': activity_mapping['phos'],
'catalytic': activity_mapping['cat'],
'gtpbound': activity_mapping['gtp'],
'transcription': activity_mapping['tscript'],
'gef': activity_mapping['gef'],
'gap': activity_mapping['gap'],
}
_pybel_indra_act_map: Mapping[Entity, str] = {
pybel_activity: indra_key
for indra_key, pybel_activity in _indra_pybel_act_map.items()
}
class PybelAssembler(object):
"""Assembles a PyBEL graph from a set of INDRA Statements.
PyBEL tools can subsequently be used to export the PyBEL graph into BEL
script files, SIF files, and other related output formats.
Parameters
----------
stmts : list[:py:class:`indra.statement.Statement`]
The list of Statements to assemble.
name : str
Name of the assembled PyBEL network.
description : str
Description of the assembled PyBEL network.
version : str
Version of the assembled PyBEL network.
authors : str
Author(s) of the network.
contact : str
Contact information (email) of the responsible author.
license : str
License information for the network.
copyright : str
Copyright information for the network.
disclaimer : str
Any disclaimers for the network.
Examples
--------
>>> from indra.statements import *
>>> map2k1 = Agent('MAP2K1', db_refs={'HGNC': '6840'})
>>> mapk1 = Agent('MAPK1', db_refs={'HGNC': '6871'})
>>> stmt = Phosphorylation(map2k1, mapk1, 'T', '185')
>>> pba = PybelAssembler([stmt])
>>> belgraph = pba.make_model()
>>> sorted(node.as_bel() for node in belgraph) # doctest:+IGNORE_UNICODE
['p(HGNC:6840 ! MAP2K1)', 'p(HGNC:6871 ! MAPK1)', 'p(HGNC:6871 ! MAPK1, pmod(go:0006468 ! "protein phosphorylation", Thr, 185))']
>>> len(belgraph)
3
>>> belgraph.number_of_edges()
2
"""
def __init__(self, stmts=None, name=None, description=None, version=None,
authors=None, contact=None, license=None, copyright=None,
disclaimer=None):
if stmts is None:
self.statements = []
else:
self.statements = stmts
if name is None:
name = 'indra'
if version is None:
version = str(uuid.uuid4())
# Create the model and assign metadata
self.model = pybel.BELGraph(
name=name,
description=description,
version=version,
authors=authors,
contact=contact,
license=license,
copyright=copyright,
disclaimer=disclaimer,
)
ns_dict = {
'HGNC': 'https://arty.scai.fraunhofer.de/artifactory/bel/'
'namespace/hgnc-human-genes/hgnc-human-genes-20170725.belns',
'UP': 'https://arty.scai.fraunhofer.de/artifactory/bel/'
'namespace/swissprot/swissprot-20170725.belns',
'IP': 'https://arty.scai.fraunhofer.de/artifactory/bel/'
'namespace/interpro/interpro-20170731.belns',
'FPLX': 'https://raw.githubusercontent.com/sorgerlab/famplex/'
'5f5b573fe26d7405dbccb711ae8e5697b6a3ec7e/export/famplex.belns',
#'PFAM':
#'NXPFA':
'CHEBI': 'https://arty.scai.fraunhofer.de/artifactory/bel/'
'namespace/chebi-ids/chebi-ids-20170725.belns',
'GO': 'https://arty.scai.fraunhofer.de/artifactory/bel/'
'namespace/go/go-20180109.belns',
'MESH': 'https://arty.scai.fraunhofer.de/artifactory/bel/'
'namespace/mesh-processes/mesh-processes-20170725.belns'
}
self.model.namespace_url.update(ns_dict)
self.model.namespace_pattern['PUBCHEM'] = '\d+'
def add_statements(self, stmts_to_add):
self.statements += stmts_to_add
def make_model(self):
for stmt in self.statements:
# Skip statements with no subject
if stmt.agent_list()[0] is None and \
not isinstance(stmt, Conversion):
continue
# Assemble statements
if isinstance(stmt, Modification):
self._assemble_modification(stmt)
elif isinstance(stmt, RegulateActivity):
self._assemble_regulate_activity(stmt)
elif isinstance(stmt, RegulateAmount):
self._assemble_regulate_amount(stmt)
elif isinstance(stmt, Gef):
self._assemble_gef(stmt)
elif isinstance(stmt, Gap):
self._assemble_gap(stmt)
elif isinstance(stmt, ActiveForm):
self._assemble_active_form(stmt)
elif isinstance(stmt, Complex):
self._assemble_complex(stmt)
elif isinstance(stmt, Conversion):
self._assemble_conversion(stmt)
elif isinstance(stmt, Autophosphorylation):
self._assemble_autophosphorylation(stmt)
elif isinstance(stmt, Transphosphorylation):
self._assemble_transphosphorylation(stmt)
else:
logger.info('Unhandled statement: %s' % stmt)
return self.model
def to_database(self, manager=None):
"""Send the model to the PyBEL database
This function wraps :py:func:`pybel.to_database`.
Parameters
----------
manager : Optional[pybel.manager.Manager]
A PyBEL database manager. If none, first checks the PyBEL
configuration for ``PYBEL_CONNECTION`` then checks the
environment variable ``PYBEL_REMOTE_HOST``. Finally,
defaults to using SQLite database in PyBEL data directory
(automatically configured by PyBEL)
Returns
-------
network : Optional[pybel.manager.models.Network]
The SQLAlchemy model representing the network that was uploaded.
Returns None if upload fails.
"""
network = pybel.to_database(self.model, manager=manager)
return network
def to_web(self, host=None, user=None, password=None):
"""Send the model to BEL Commons by wrapping :py:func:`pybel.to_web`
The parameters ``host``, ``user``, and ``password`` all check the
PyBEL configuration, which is located at
``~/.config/pybel/config.json`` by default
Parameters
----------
host : Optional[str]
The host name to use. If none, first checks the PyBEL
configuration entry ``PYBEL_REMOTE_HOST``, then the
environment variable ``PYBEL_REMOTE_HOST``. Finally, defaults
to https://bel-commons.scai.fraunhofer.de.
user : Optional[str]
The username (email) to use. If none, first checks the
PyBEL configuration entry ``PYBEL_REMOTE_USER``,
then the environment variable ``PYBEL_REMOTE_USER``.
password : Optional[str]
The password to use. If none, first checks the PyBEL configuration
entry ``PYBEL_REMOTE_PASSWORD``, then the environment variable
``PYBEL_REMOTE_PASSWORD``.
Returns
-------
response : requests.Response
The response from the BEL Commons network upload endpoint.
"""
response = pybel.to_web(self.model, host=host, user=user,
password=password)
return response
def save_model(self, path, output_format=None):
"""Save the :class:`pybel.BELGraph` using one of the outputs from
:py:mod:`pybel`
Parameters
----------
path : str
The path to output to
output_format : Optional[str]
Output format as ``cx``, ``pickle``, ``json`` or defaults to ``bel``
"""
if output_format == 'pickle':
pybel.to_pickle(self.model, path)
else:
with open(path, 'w') as fh:
if output_format == 'json':
pybel.to_nodelink_file(self.model, fh)
elif output_format == 'cx':
pybel.to_cx_file(self.model, fh)
else: # output_format == 'bel':
pybel.to_bel_script(self.model, fh)
def _add_nodes_edges(self, subj_agent, obj_agent, relation, stmt):
"""Given subj/obj agents, relation, and evidence, add nodes/edges."""
subj_data, subj_edge = _get_agent_node(subj_agent)
obj_data, obj_edge = _get_agent_node(obj_agent)
# If we failed to create nodes for subject or object, skip it
if subj_data is None or obj_data is None:
return
self.model.add_node_from_data(subj_data)
self.model.add_node_from_data(obj_data)
edge_data_list = _combine_edge_data(
relation=relation,
subj_edge=subj_edge,
obj_edge=obj_edge,
stmt=stmt,
)
for edge_data in edge_data_list:
self.model.add_edge(subj_data, obj_data, **edge_data)
def _assemble_regulate_activity(self, stmt):
"""Example: p(HGNC:MAP2K1) => act(p(HGNC:MAPK1))"""
act_obj = deepcopy(stmt.obj)
act_obj.activity = stmt._get_activity_condition()
# We set is_active to True here since the polarity is encoded
# in the edge (decreases/increases)
act_obj.activity.is_active = True
activates = isinstance(stmt, Activation)
relation = get_causal_edge(stmt, activates)
self._add_nodes_edges(stmt.subj, act_obj, relation, stmt)
def _assemble_modification(self, stmt):
"""Example: p(HGNC:MAP2K1) => p(HGNC:MAPK1, pmod(Ph, Thr, 185))"""
sub_agent = deepcopy(stmt.sub)
sub_agent.mods.append(stmt._get_mod_condition())
activates = isinstance(stmt, AddModification)
relation = get_causal_edge(stmt, activates)
self._add_nodes_edges(stmt.enz, sub_agent, relation, stmt)
def _assemble_regulate_amount(self, stmt):
"""Example: p(HGNC:ELK1) => p(HGNC:FOS)"""
activates = isinstance(stmt, IncreaseAmount)
relation = get_causal_edge(stmt, activates)
self._add_nodes_edges(stmt.subj, stmt.obj, relation, stmt)
def _assemble_gef(self, stmt):
"""Example: act(p(HGNC:SOS1), ma(gef)) => act(p(HGNC:KRAS), ma(gtp))"""
gef = deepcopy(stmt.gef)
gef.activity = ActivityCondition('gef', True)
ras = deepcopy(stmt.ras)
ras.activity = ActivityCondition('gtpbound', True)
self._add_nodes_edges(gef, ras, pc.DIRECTLY_INCREASES, stmt)
def _assemble_gap(self, stmt):
"""Example: act(p(HGNC:RASA1), ma(gap)) =| act(p(HGNC:KRAS), ma(gtp))"""
gap = deepcopy(stmt.gap)
gap.activity = ActivityCondition('gap', True)
ras = deepcopy(stmt.ras)
ras.activity = ActivityCondition('gtpbound', True)
self._add_nodes_edges(gap, ras, pc.DIRECTLY_DECREASES, stmt)
def _assemble_active_form(self, stmt):
"""Example: p(HGNC:ELK1, pmod(Ph)) => act(p(HGNC:ELK1), ma(tscript))"""
act_agent = Agent(stmt.agent.name, db_refs=stmt.agent.db_refs)
act_agent.activity = ActivityCondition(stmt.activity, True)
activates = stmt.is_active
relation = get_causal_edge(stmt, activates)
if not stmt.agent.mods and not stmt.agent.bound_conditions and \
not stmt.agent.mutations:
self._add_nodes_edges(stmt.agent, act_agent, relation, stmt)
else:
for mod in stmt.agent.mods:
mod_agent = Agent(
stmt.agent.name, db_refs=stmt.agent.db_refs, mods=[mod])
self._add_nodes_edges(mod_agent, act_agent, relation, stmt)
for bc in stmt.agent.bound_conditions:
bound_agent = Agent(
stmt.agent.name, db_refs=stmt.agent.db_refs,
bound_conditions=[bc])
self._add_nodes_edges(bound_agent, act_agent, relation, stmt)
for mut in stmt.agent.mutations:
mut_agent = Agent(
stmt.agent.name, db_refs=stmt.agent.db_refs,
mutations=[mut])
self._add_nodes_edges(mut_agent, act_agent, relation, stmt)
def _assemble_complex(self, stmt):
"""Example: complex(p(HGNC:MAPK14), p(HGNC:TAB1))"""
complex_data, _ = _get_complex_node(stmt.members)
if complex_data is None:
logger.info('skip adding complex with no members: %s', stmt.members)
return
self.model.add_node_from_data(complex_data)
def _assemble_conversion(self, stmt):
"""Example: p(HGNC:HK1) => rxn(reactants(a(CHEBI:"CHEBI:17634")),
products(a(CHEBI:"CHEBI:4170")))"""
pybel_lists = ([], [])
for pybel_list, agent_list in \
zip(pybel_lists, (stmt.obj_from, stmt.obj_to)):
for agent in agent_list:
node = _get_agent_grounding(agent)
# TODO check for missing grounding?
pybel_list.append(node)
rxn_node_data = reaction(
reactants=pybel_lists[0],
products=pybel_lists[1],
)
self.model.add_node_from_data(rxn_node_data)
obj_edge = None # TODO: Any edge information possible here?
# Add node for controller, if there is one
if stmt.subj is not None:
subj_attr, subj_edge = _get_agent_node(stmt.subj)
self.model.add_node_from_data(subj_attr)
edge_data_list = _combine_edge_data(
relation=pc.DIRECTLY_INCREASES,
subj_edge=subj_edge,
obj_edge=obj_edge,
stmt=stmt,
)
for edge_data in edge_data_list:
self.model.add_edge(subj_attr, rxn_node_data, **edge_data)
def _assemble_autophosphorylation(self, stmt):
"""Example: complex(p(HGNC:MAPK14), p(HGNC:TAB1)) =>
p(HGNC:MAPK14, pmod(Ph, Tyr, 100))"""
sub_agent = deepcopy(stmt.enz)
mc = stmt._get_mod_condition()
sub_agent.mods.append(mc)
# FIXME Ignore any bound conditions on the substrate!!!
# This is because if they are included, a complex node will be returned,
# which (at least currently) won't incorporate any protein
# modifications.
sub_agent.bound_conditions = []
# FIXME
self._add_nodes_edges(stmt.enz, sub_agent, pc.DIRECTLY_INCREASES, stmt)
def _assemble_transphosphorylation(self, stmt):
"""Example: complex(p(HGNC:EGFR)) =>
p(HGNC:EGFR, pmod(Ph, Tyr, 1173))"""
# Check our assumptions about the bound condition of the enzyme
assert len(stmt.enz.bound_conditions) == 1
assert stmt.enz.bound_conditions[0].is_bound
# Create a modified protein node for the bound target
sub_agent = deepcopy(stmt.enz.bound_conditions[0].agent)
sub_agent.mods.append(stmt._get_mod_condition())
self._add_nodes_edges(stmt.enz, sub_agent, pc.DIRECTLY_INCREASES, stmt)
def _assemble_translocation(self, stmt):
pass
def belgraph_to_signed_graph(
belgraph, include_variants=True, symmetric_variant_links=False,
include_components=True, symmetric_component_links=False,
propagate_annotations=False):
def get_ns(n):
# For nodes containing several agents (complex abundance or reaction)
# return namespace of the first member
if isinstance(n, complex_abundance):
return get_ns(n.members[0])
if isinstance(n, reaction):
return get_ns(n.products[0])
return n.namespace
graph = nx.MultiDiGraph()
for n in belgraph.nodes:
graph.add_node(n, ns=get_ns(n))
edge_set = set()
for u, v, edge_data in belgraph.edges(data=True):
rel = edge_data.get('relation')
pos_edge = \
(u, v, ('sign', 0)) + \
tuple((key, tuple(entry)) if len(entry) > 1 else tuple(
(key, *tuple(entry)))
for key, entry in edge_data.get('annotations', {}).items()) \
if propagate_annotations else (u, v, ('sign', 0))
# Unpack tuple pairs at indices >1 or they'll be in nested tuples
rev_pos_edge = (pos_edge[1], pos_edge[0], *pos_edge[2:])
if rel in pc.CAUSAL_INCREASE_RELATIONS:
edge_set.add(pos_edge)
elif rel in pc.HAS_VARIANT and include_variants:
edge_set.add(pos_edge)
if symmetric_variant_links:
edge_set.add(rev_pos_edge)
elif rel in pc.PART_OF and include_components:
edge_set.add(pos_edge)
if symmetric_component_links:
edge_set.add(rev_pos_edge)
elif rel in pc.CAUSAL_DECREASE_RELATIONS:
# Unpack tuples
edge_set.add((pos_edge[0], pos_edge[1],
('sign', 1), *pos_edge[3:]))
else:
continue
graph.add_edges_from((t[0], t[1], dict(t[2:])) for t in edge_set)
return graph
def _combine_edge_data(relation, subj_edge, obj_edge, stmt):
edge_data = {
pc.RELATION: relation,
pc.ANNOTATIONS: _get_annotations_from_stmt(stmt),
}
if subj_edge:
edge_data[pc.SUBJECT] = subj_edge
if obj_edge:
edge_data[pc.OBJECT] = obj_edge
if not stmt.evidence:
return [edge_data]
return [
_update_edge_data_from_evidence(evidence, edge_data)
for evidence in stmt.evidence
]
def _update_edge_data_from_evidence(evidence, edge_data):
edge_data_one = copy(edge_data)
citation, evidence, annotations = _get_evidence(evidence)
edge_data_one.update({
pc.CITATION: citation,
pc.EVIDENCE: evidence,
})
edge_data_one[pc.ANNOTATIONS].update(annotations)
return edge_data_one
def _get_annotations_from_stmt(stmt):
return {
'stmt_hash': {stmt.get_hash(refresh=True): True},
'uuid': {stmt.uuid: True},
'belief': {stmt.belief: True},
}
def _get_agent_node(agent):
if not agent.bound_conditions:
return _get_agent_node_no_bcs(agent)
# Check if bound conditions are bound to agent
bound_conditions = [
bc.agent for bc in agent.bound_conditions if bc.is_bound]
if not bound_conditions:
return _get_agent_node_no_bcs(agent)
# "Flatten" the bound conditions for the agent at this level
agent_no_bc = deepcopy(agent)
agent_no_bc.bound_conditions = []
members = [agent_no_bc] + bound_conditions
return _get_complex_node(members)
def _get_complex_node(members):
members_list = []
for member in members:
member_data, member_edge = _get_agent_node(member)
if member_data:
members_list.append(member_data)
if members_list:
complex_node_data = complex_abundance(members=members_list)
return complex_node_data, None
return None, None
def _get_agent_node_no_bcs(agent):
node_data = _get_agent_grounding(agent)
if node_data is None:
logger.warning('Agent %s has no grounding.', agent)
return None, None
variants = []
for mod in agent.mods:
pybel_mod = pmod_namespace.get(mod.mod_type)
if not pybel_mod:
logger.info('Skipping modification of type %s on agent %s',
mod.mod_type, agent)
continue
pmod_entity = pmod_mappings[pybel_mod]['xrefs'][0]
var = ProteinModification(
namespace=pmod_entity.namespace,
name=pmod_entity.name,
identifier=pmod_entity.identifier,
)
if mod.residue is not None:
res = amino_acids[mod.residue]['short_name'].capitalize()
var[pc.PMOD_CODE] = res
if mod.position is not None:
var[pc.PMOD_POSITION] = int(mod.position)
variants.append(var)
for mut in agent.mutations:
var = hgvs(mut.to_hgvs())
variants.append(var)
if variants and not isinstance(node_data, CentralDogma):
logger.warning('Node should not have variants: %s, %s', node_data,
variants)
elif variants:
node_data = node_data.with_variants(variants)
if isinstance(node_data, (bioprocess, pathology)):
return node_data, None
# Also get edge data for the agent
edge_data = _get_agent_activity(agent)
return node_data, edge_data
def _get_agent_grounding(agent):
"""Convert an agent to the corresponding PyBEL DSL object (to be filled
with variants later)."""
def _get_id(_agent, key):
_id = _agent.db_refs.get(key)
if isinstance(_id, list):
_id = _id[0]
return _id
hgnc_id = _get_id(agent, 'HGNC')
if hgnc_id:
hgnc_name = hgnc_client.get_hgnc_name(hgnc_id)
if not hgnc_name:
logger.warning('Agent %s with HGNC ID %s has no HGNC name.',
agent, hgnc_id)
return
return protein('HGNC', name=hgnc_name, identifier=hgnc_id)
uniprot_id = _get_id(agent, 'UP')
if uniprot_id:
return protein('UP', name=uniprot_id, identifier=uniprot_id)
fplx_id = _get_id(agent, 'FPLX')
if fplx_id:
return protein('FPLX', name=fplx_id, identifier=fplx_id)
pfam_id = _get_id(agent, 'PF')
if pfam_id:
return protein('PFAM', name=agent.name, identifier=pfam_id)
ip_id = _get_id(agent, 'IP')
if ip_id:
return protein('IP', ip_id)
fa_id = _get_id(agent, 'FA')
if fa_id:
return protein('NXPFA', fa_id)
chebi_id = _get_id(agent, 'CHEBI')
if chebi_id:
if chebi_id.startswith('CHEBI:'):
chebi_id = chebi_id[len('CHEBI:'):]
return abundance('CHEBI', name=agent.name, identifier=chebi_id)
pubchem_id = _get_id(agent, 'PUBCHEM')
if pubchem_id:
return abundance('PUBCHEM', name=pubchem_id, identifier=pubchem_id)
go_id = _get_id(agent, 'GO')
if go_id:
return bioprocess('GO', name=agent.name, identifier=go_id)
mesh_id = _get_id(agent, 'MESH')
if mesh_id:
return bioprocess('MESH', name=agent.name, identifier=mesh_id)
return abundance('TEXT', name=agent.name)
def _get_agent_activity(agent):
ac = agent.activity
if not ac:
return None
if not ac.is_active:
logger.warning('Cannot represent negative activity in PyBEL: %s' %
agent)
if ac.activity_type == 'activity':
return activity()
return activity(**_indra_pybel_act_map[ac.activity_type])
def _get_evidence(evidence):
text = evidence.text if evidence.text else 'No evidence text.'
# If there is a PMID, use it as the citation
if evidence.pmid:
citation = citation_dict(
namespace=pc.CITATION_TYPE_PUBMED,
identifier=evidence.pmid,
)
# If no PMID, include the interface and source_api for now--
# in general this should probably be in the annotations for all evidence
else:
cit_source = evidence.source_api or 'Unknown'
cit_id = evidence.source_id or 'Unknown'
cit_ref_str = '%s:%s' % (cit_source, cit_id)
citation = citation_dict(
namespace=pc.CITATION_TYPE_OTHER,
identifier=cit_ref_str,
)
annotations = {
'source_hash': {evidence.get_source_hash(): True},
}
if evidence.source_api:
annotations['source_api'] = {evidence.source_api: True}
if evidence.source_id:
annotations['source_id'] = {evidence.source_id: True}
for key, value in evidence.epistemics.items():
if key == 'direct' or value is None:
continue
if isinstance(value, (list, set, tuple)):
annotations[key] = {v: True for v in value}
else:
annotations[key] = {value: True}
return citation, text, annotations
def get_causal_edge(stmt, activates):
"""Returns the causal, polar edge with the correct "contact"."""
any_contact = any(
evidence.epistemics.get('direct', False)
for evidence in stmt.evidence
)
if any_contact:
return pc.DIRECTLY_INCREASES if activates else pc.DIRECTLY_DECREASES
return pc.INCREASES if activates else pc.DECREASES
|
johnbachman/indra
|
indra/assemblers/pybel/assembler.py
|
Python
|
bsd-2-clause
| 25,382
|
[
"Pybel"
] |
776ebe1ab20b43b3d48e31ddf75df7b844a0429ce2a0925eebe0c90dec7f0d4e
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import numpy
# D2h C2h C2v D2 Cs Ci C2 C1
# E E E E E E E E
# C2x C2x
# C2y C2y
# C2z C2 C2 C2z C2
# i i i
# sx sx
# sy sy
# sz sh sh
POINTGROUP = ('D2h', 'C2h', 'C2v', 'D2' , 'Cs' , 'Ci' , 'C2' , 'C1' ,)
OPERATOR_TABLE = {
'D2h': ('E', 'C2x', 'C2y', 'C2z', 'i', 'sx' , 'sy' , 'sz' ),
'C2h': ('E', 'C2z', 'i', 'sz' ),
'C2v': ('E', 'C2z', 'sx' , 'sy' , ),
'D2' : ('E', 'C2x', 'C2y', 'C2z', ),
'Cs' : ('E', 'sz' ),
'Ci' : ('E', 'i', ),
'C2' : ('E', 'C2z', ),
'C1' : ('E', ),
}
#
IRREP_ID_TABLE = { # bin for XOR
'D2h': {'Ag' : 0, # 000
'B1g': 1, # 001
'B2g': 2, # 010
'B3g': 3, # 011
'Au' : 4, # 100
'B1u': 5, # 101
'B2u': 6, # 110
'B3u': 7,}, # 111
'C2h': {'Ag': 0, # 00
'Bg': 1, # 01
'Au': 2, # 10
'Bu': 3,}, # 11
'C2v': {'A1': 0, # 00
'A2': 1, # 01
'B1': 2, # 10
'B2': 3,}, # 11
'D2' : {'A' : 0, # 00
'B1': 1, # 01
'B2': 2, # 10
'B3': 3,}, # 11
'Cs' : {'A\'': 0, # 0
'A\"': 1,}, # 1
'Ci' : {'Ag': 0, # 0
'Au': 1,}, # 1
'C2' : {'A': 0, # 0
'B': 1,}, # 1
'C1' : {'A': 0,}, # 0
}
IRREP_ID_MOLPRO = {'D2h': (1, # Ag
4, # B1g
6, # B2g
7, # B3g
8, # Au
5, # B1u
3, # B2u
2), # B3u
'C2v': (1, # A1
4, # A2
2, # B1
3), # B2
'C2h': (1, # Ag
4, # Bg
2, # Au
3), # Bu
'D2' : (1, # A
4, # B1
3, # B2
2), # B3
'Cs' : (1, # A'
2), # A"
'C2' : (1, # A
2), # B
'Ci' : (1, # Ag
2), # Au
'C1' : (1,)}
# E,C2x,C2y,C2z,i, sx,sy,sz
CHARACTER_TABLE = { # XOR
'D2h': (('Ag' , 1, 1, 1, 1, 1, 1, 1, 1), # 000
('B1g', 1,-1, -1, 1, 1,-1,-1, 1), # 001
('B2g', 1,-1, 1, -1, 1,-1, 1,-1), # 010
('B3g', 1, 1, -1, -1, 1, 1,-1,-1), # 011
('Au' , 1, 1, 1, 1, -1,-1,-1,-1), # 100
('B1u', 1,-1, -1, 1, -1, 1, 1,-1), # 101
('B2u', 1,-1, 1, -1, -1, 1,-1, 1), # 110
('B3u', 1, 1, -1, -1, -1,-1, 1, 1)), # 111
# E,C2,i, sh # XOR
'C2h': (('Ag', 1, 1, 1, 1), # 00
('Bg', 1,-1, 1,-1), # 01
('Au', 1, 1,-1,-1), # 10
('Bu', 1,-1,-1, 1)), # 11
# E,C2,sx,sy # XOR
'C2v': (('A1', 1, 1, 1, 1), # 00
('A2', 1, 1,-1,-1), # 01
('B1', 1,-1,-1, 1), # 10
('B2', 1,-1, 1,-1)), # 11
# E,C2x,C2y,C2z # XOR
'D2' : (('A' , 1, 1, 1, 1), # 00
('B1', 1,-1, -1, 1), # 01
('B2', 1,-1, 1, -1), # 10
('B3', 1, 1, -1, -1)), # 11
# E, sh # XOR
'Cs' : (('A\'',1, 1,), # 0
('A\"',1,-1,)), # 1
# E, i # XOR
'Ci' : (('Ag', 1, 1,), # 0
('Au', 1,-1,)), # 1
# E, C2 # XOR
'C2' : (('A', 1, 1,), # 0
('B', 1,-1,)), # 1
# E # XOR
'C1' : (('A', 1),), # 0
}
# D2h C2h C2v D2 Cs Ci C2 C1
SYMM_DESCENT_Z = (
('Ag' , 'Ag', 'A1', 'A' , 'A\'', 'Ag', 'A', 'A'),
('B1g', 'Ag', 'A2', 'B1', 'A\'', 'Ag', 'A', 'A'),
('B2g', 'Bg', 'B1', 'B2', 'A\"', 'Ag', 'B', 'A'),
('B3g', 'Bg', 'B2', 'B3', 'A\"', 'Ag', 'B', 'A'),
('Au' , 'Au', 'A2', 'A' , 'A\'', 'Au', 'A', 'A'),
('B1u', 'Au', 'A1', 'B1', 'A\'', 'Au', 'A', 'A'),
('B2u', 'Bu', 'B2', 'B2', 'A\"', 'Au', 'B', 'A'),
('B3u', 'Bu', 'B1', 'B3', 'A\"', 'Au', 'B', 'A'),
)
SYMM_DESCENT_X = (
('Ag' , 'Ag', 'A1', 'A' , 'A\'', 'Ag', 'A', 'A'),
('B1g', 'Bg', 'B2', 'B1', 'A\"', 'Ag', 'B', 'A'),
('B2g', 'Bg', 'B1', 'B2', 'A\"', 'Ag', 'B', 'A'),
('B3g', 'Ag', 'A2', 'B3', 'A\'', 'Ag', 'A', 'A'),
('Au' , 'Au', 'A2', 'A' , 'A\"', 'Au', 'A', 'A'),
('B1u', 'Bu', 'B1', 'B1', 'A\'', 'Au', 'B', 'A'),
('B2u', 'Bu', 'B2', 'B2', 'A\'', 'Au', 'B', 'A'),
('B3u', 'Au', 'A1', 'B3', 'A\"', 'Au', 'A', 'A'),
)
SYMM_DESCENT_Y = (
('Ag' , 'Ag', 'A1', 'A' , 'A\'', 'Ag', 'A', 'A'),
('B1g', 'Bg', 'B2', 'B1', 'A\"', 'Ag', 'B', 'A'),
('B2g', 'Ag', 'A2', 'B2', 'A\'', 'Ag', 'A', 'A'),
('B3g', 'Bg', 'B1', 'B3', 'A\"', 'Ag', 'B', 'A'),
('Au' , 'Au', 'A2', 'A' , 'A\"', 'Au', 'A', 'A'),
('B1u', 'Bu', 'B1', 'B1', 'A\'', 'Au', 'B', 'A'),
('B2u', 'Au', 'A1', 'B2', 'A\"', 'Au', 'A', 'A'),
('B3u', 'Bu', 'B2', 'B3', 'A\'', 'Au', 'B', 'A'),
)
SPHERIC_GTO_PARITY_ODD = (
# s
((0, 0, 0),),
# px, py, pz
((1, 0, 0),(0, 1, 0),(0, 0, 1)),
# dxy, dyz, dz2, dxz, dx2y2
((1, 1, 0),(0, 1, 1),(0, 0, 0),(1, 0, 1),(0, 0, 0),),
# fyx2, fxyz, fyz2, fz3, fxz2, fzx2, fx3
((0, 1, 0),(1, 1, 1),(0, 1, 0),(0, 0, 1),(1, 0, 0),
(0, 0, 1),(1, 0, 0),),
# g
((1, 1, 0),(0, 1, 1),(1, 1, 0),(0, 1, 1),(0, 0, 0),
(1, 0, 1),(0, 0, 0),(1, 0, 1),(0, 0, 0),),
# h
((0, 1, 0),(1, 1, 1),(0, 1, 0),(1, 1, 1),(0, 1, 0),
(0, 0, 1),(1, 0, 0),(0, 0, 1),(1, 0, 0),(0, 0, 1),
(1, 0, 0),),
# i
((1, 1, 0),(0, 1, 1),(1, 1, 0),(0, 1, 1),(1, 1, 0),
(0, 1, 1),(0, 0, 0),(1, 0, 1),(0, 0, 0),(1, 0, 1),
(0, 0, 0),(1, 0, 1),(0, 0, 0),),
# j
((0, 1, 0),(1, 1, 1),(0, 1, 0),(1, 1, 1),(0, 1, 0),
(1, 1, 1),(0, 1, 0),(0, 0, 1),(1, 0, 0),(0, 0, 1),
(1, 0, 0),(0, 0, 1),(1, 0, 0),(0, 0, 1),(1, 0, 0))
)
SUBGROUP = {
'Dooh':('Coov', 'D2h', 'C2v', 'C2h', 'C2', 'Cs', 'Ci', 'C1'),
'Coov':('C2v', 'C2', 'C1'),
'D2h': ('D2h', 'C2v', 'C2h', 'C2', 'Cs', 'Ci', 'C1'),
'C2v': ('C2v', 'C2' , 'Cs' , 'C1'),
'C2h': ('C2h', 'C2' , 'Cs' , 'C1'),
'D2' : ('D2' , 'C2' , 'Ci' , 'C1'),
'Cs' : ('Cs' , 'C1'),
'Ci' : ('Ci' , 'C1'),
'C2' : ('C2' , 'C1'),
'C1' : ('C1',),
}
D2H_OPS = {'E' : numpy.eye(3),
'C2z': numpy.diag((-1.,-1., 1.)),
'C2x': numpy.diag(( 1.,-1.,-1.)),
'C2y': numpy.diag((-1., 1.,-1.)),
'i' : numpy.diag((-1.,-1.,-1.)),
'sz' : numpy.diag(( 1., 1.,-1.)),
'sx' : numpy.diag((-1., 1., 1.)),
'sy' : numpy.diag(( 1.,-1., 1.)),}
|
gkc1000/pyscf
|
pyscf/symm/param.py
|
Python
|
apache-2.0
| 8,377
|
[
"PySCF"
] |
ba6928e70e4275fb37a815fc58d2d3707460798a84935d9cfce26d5c6280875b
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Loreto Parisi <loretoparisi@gmail.com>
# Copyright (C) 2016 Silvio Olivastri <silvio.olivastri@gmail.com>
# Copyright (C) 2016 Radim Rehurek <radim@rare-technologies.com>
"""
USAGE: $ python -m gensim.scripts.word2vec2tensor --input <Word2Vec model file> --output <TSV tensor filename prefix> [--binary] <Word2Vec binary flag>
Where:
<Word2Vec model file>: Input Word2Vec model
<TSV tensor filename prefix>: 2D tensor TSV output file name prefix
<Word2Vec binary flag>: Set True if Word2Vec model is binary. Defaults to False.
Output:
The script will create two TSV files. A 2d tensor format file, and a Word Embedding metadata file. Both files will
us the --output file name as prefix
This script is used to convert the word2vec format to Tensorflow 2D tensor and metadata formats for Embedding Visualization
To use the generated TSV 2D tensor and metadata file in the Projector Visualizer, please
1) Open http://projector.tensorflow.org/.
2) Choose "Load Data" from the left menu.
3) Select "Choose file" in "Load a TSV file of vectors." and choose you local "_tensor.tsv" file
4) Select "Choose file" in "Load a TSV file of metadata." and choose you local "_metadata.tsv" file
For more information about TensorBoard TSV format please visit:
https://www.tensorflow.org/versions/master/how_tos/embedding_viz/
"""
import os
import sys
import random
import logging
import argparse
import gensim
logger = logging.getLogger(__name__)
def word2vec2tensor(word2vec_model_path,tensor_filename, binary=False):
'''
Convert Word2Vec mode to 2D tensor TSV file and metadata file
Args:
param1 (str): word2vec model file path
param2 (str): filename prefix
param2 (bool): set True to use a binary Word2Vec model, defaults to False
'''
model = gensim.models.Word2Vec.load_word2vec_format(word2vec_model_path, binary=binary)
outfiletsv = tensor_filename + '_tensor.tsv'
outfiletsvmeta = tensor_filename + '_metadata.tsv'
with open(outfiletsv, 'w+') as file_vector:
with open(outfiletsvmeta, 'w+') as file_metadata:
for word in model.index2word:
file_metadata.write(word.encode('utf-8') + '\n')
vector_row = '\t'.join(map(str, model[word]))
file_vector.write(vector_row + '\n')
logger.info("2D tensor file saved to %s" % outfiletsv)
logger.info("Tensor metadata file saved to %s" % outfiletsvmeta)
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s', level=logging.INFO)
logging.root.setLevel(level=logging.INFO)
logger.info("running %s", ' '.join(sys.argv))
# check and process cmdline input
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 2:
print(globals()['__doc__'] % locals())
sys.exit(1)
parser = argparse.ArgumentParser()
parser.add_argument(
"-i", "--input", required=True,
help="Input word2vec model")
parser.add_argument(
"-o", "--output", required=True,
help="Output tensor file name prefix")
parser.add_argument( "-b", "--binary",
required=False,
help="If word2vec model in binary format, set True, else False")
args = parser.parse_args()
word2vec2tensor(args.input, args.output, args.binary)
logger.info("finished running %s", program)
|
akutuzov/gensim
|
gensim/scripts/word2vec2tensor.py
|
Python
|
lgpl-2.1
| 3,507
|
[
"VisIt"
] |
4aecc95aec4771c1b9020a340ef957e3012db7b0958d38405d7f08676ef80bea
|
from ase.io.png import PNG
from ase.data.colors import jmol_colors
from ase.data import covalent_radii
from ase.utils import rotate
from math import sqrt
class MyPNG(PNG):
def __init__(self, atoms,
rotation='',
show_unit_cell=False,
radii=None,
bbox=None,
colors=None,
model=None,
scale=20) :
self.numbers = atoms.get_atomic_numbers()
self.colors = colors
self.model = model
if colors is None:
self.colors = jmol_colors[self.numbers]
if radii is None:
radii = covalent_radii[self.numbers]
elif type(radii) is float:
radii = covalent_radii[self.numbers] * radii
else:
radii = np.array(radii)
natoms = len(atoms)
if isinstance(rotation, str):
rotation = rotate(rotation)
A = atoms.get_cell()
if show_unit_cell > 0:
L, T, D = self.cell_to_lines(A)
C = np.empty((2, 2, 2, 3))
for c1 in range(2):
for c2 in range(2):
for c3 in range(2):
C[c1, c2, c3] = np.dot([c1, c2, c3], A)
C.shape = (8, 3)
C = np.dot(C, rotation) # Unit cell vertices
else:
L = np.empty((0, 3))
T = None
D = None
C = None
nlines = len(L)
X = np.empty((natoms + nlines, 3))
R = atoms.get_positions()
X[:natoms] = R
X[natoms:] = L
r2 = radii**2
for n in range(nlines):
d = D[T[n]]
if ((((R - L[n] - d)**2).sum(1) < r2) &
(((R - L[n] + d)**2).sum(1) < r2)).any():
T[n] = -1
X = np.dot(X, rotation)
R = X[:natoms]
if bbox is None:
X1 = (R - radii[:, None]).min(0)
X2 = (R + radii[:, None]).max(0)
if show_unit_cell == 2:
X1 = np.minimum(X1, C.min(0))
X2 = np.maximum(X2, C.max(0))
M = (X1 + X2) / 2
S = 1.05 * (X2 - X1)
w = scale * S[0]
#if w > 500:
#w = 500
#scale = w / S[0]
h = scale * S[1]
offset = np.array([scale * M[0] - w / 2, scale * M[1] - h / 2, 0])
else:
w = (bbox[2] - bbox[0]) * scale
h = (bbox[3] - bbox[1]) * scale
offset = np.array([bbox[0], bbox[1], 0]) * scale
self.w = w
self.h = h
X *= scale
X -= offset
if nlines > 0:
D = np.dot(D, rotation)[:, :2] * scale
if C is not None:
C *= scale
C -= offset
A = np.dot(A, rotation)
A *= scale
self.A = A
self.X = X
self.D = D
self.T = T
self.C = C
self.natoms = natoms
self.d = 2 * scale * radii
def write(self, filename, resolution=72):
self.filename = filename
self.write_header(resolution=resolution)
self.write_info()
self.write_body()
self.write_trailer(resolution=resolution)
def write_info(self):
def latex_float(f):
float_str = "{0:.2e}".format(f)
if "e" in float_str:
base, exponent = float_str.split("e")
return r"{0} \times 10^{{{1}}}".format(base, int(exponent))
else:
return float_str
import matplotlib.text
if self.model is not None:
time = latex_float(self.model.base.get_kmc_time())
text = matplotlib.text.Text(.05*self.w,
.9*self.h,
r'$t = {time}\,{{\rm s}}$'.format(**locals()),
fontsize=36,
bbox={'facecolor':'white', 'alpha':0.5, 'ec':'white', 'pad':1, 'lw':0 },
)
text.figure = self.figure
text.draw(self.renderer)
def write_header(self, resolution=72):
from matplotlib.backends.backend_agg import RendererAgg, Figure
from matplotlib.backend_bases import GraphicsContextBase
try:
from matplotlib.transforms import Value
except ImportError:
dpi = resolution
else:
dpi = Value(resolution)
self.renderer = RendererAgg(self.w, self.h, dpi)
self.figure = Figure()
self.gc = GraphicsContextBase()
self.gc.set_linewidth(.2)
def write_trailer(self, resolution=72):
renderer = self.renderer
if hasattr(renderer._renderer, 'write_png'):
# Old version of matplotlib:
renderer._renderer.write_png(self.filename)
else:
from matplotlib import _png
# buffer_rgba does not accept arguments from version 1.2.0
# https://github.com/matplotlib/matplotlib/commit/f4fee350f9fbc639853bee76472d8089a10b40bd
import matplotlib
if matplotlib.__version__ < '1.2.0':
x = renderer._renderer.buffer_rgba(0, 0)
_png.write_png(renderer._renderer.buffer_rgba(0, 0),
renderer.width, renderer.height,
self.filename, resolution)
else:
x = renderer._renderer.buffer_rgba()
_png.write_png(renderer._renderer.buffer_rgba(),
renderer.width, renderer.height,
self.filename, resolution)
|
mieand/kmos
|
kmos/run/png.py
|
Python
|
gpl-3.0
| 5,702
|
[
"ASE"
] |
9dd63edf92d12ee45bed49c54b93e1cb8ef55b6b84425c83892ff176a74bde2f
|
#!/usr/bin/env python
"""
the Easy Vision Environment
EVE provides easy-to-use functionality for performing common image
processing and computer vision tasks. The intention is for them to be
used during interactive sessions, from the Python interpreter's command
prompt or from an enhanced interpreter such as ipython as well as in
scripts.
EVE is built principally on top of the popular numpy ('numerical
python') extension to Python. Images are represented as numpy arrays,
usually 32-bit floating-point ones, indexed by line, pixel and channel,
in that order: image[line,pixel,channel]. The choice of a
floating-point representation is deliberate: it permits images that have
been captured from sensors with more than 8 bits dynamic range to be
processed (e.g., astronomical images and digital radiographs); it
supports Fourier-space processing; and it avoids having to worry about
rounding values except at output. Images in EVE may also contain any
number of channels, so EVE can be used with e.g. remote sensing or
hyperspectral imagery.
Other Python extensions are loaded by those routines that need them. In
particular, PIL (the 'Python Imaging Extension') is used for the input
and output of common image file formats, though not for any processing.
scipy ('scientific python') is used by several routines, and so are a
few other extensions here and there.
On the other hand, EVE is slow. If you're thinking of using EVE instead
of openCV for real-time video processing, forget it! This is partly
because of the interpreted nature of Python and partly because EVE
attempts to provide algorithms that are understandable rather than fast:
it is intended as a prototyping environment rather than a real-time
delivery one. (This also makes it useful for teaching how vision
algorithms work, of course.) In the fullness of time, it is intended to
hook either OpenCV or dedicated C code backends for common functions
that could usefully be speeded up, and also to investigate the use of
GPUs -- but not yet.
EVE was written by Adrian F. Clark <alien@essex.ac.uk>, though several
routines are adapted from code written by others; such code is
attributed in the relevant routines. EVE is made available entirely
freely: you are at liberty to use it in your own work, either as is or
after modification. The author would be very happy to hear of
improvements or enhancements that you may make.
"""
from __future__ import division
import math, numpy, os, platform, re, string, struct, sys, tempfile
#-------------------------------------------------------------------------------
# Symbolic constants.
# The operating system we are running under, used to select the appropriate
# external program for display or grabbing images and a few other things.
systype = platform.system ()
tiny = 1.0e-7 # the smallest number worth bothering about
max_image_value = 255.0 # the largest value normally put into an image
character_height = 13 # height of characters in draw_text()
character_width = 10 # width of characters in draw_text()
character_bitmap = {
' ': [0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],
'!': [0x00,0x00,0x18,0x18,0x00,0x00,0x18,0x18,0x18,0x18,0x18,0x18,0x18],
'"': [0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x36,0x36,0x36,0x36],
'#': [0x00,0x00,0x00,0x66,0x66,0xff,0x66,0x66,0xff,0x66,0x66,0x00,0x00],
'$': [0x00,0x00,0x18,0x7e,0xff,0x1b,0x1f,0x7e,0xf8,0xd8,0xff,0x7e,0x18],
'%': [0x00,0x00,0x0e,0x1b,0xdb,0x6e,0x30,0x18,0x0c,0x76,0xdb,0xd8,0x70],
'&': [0x00,0x00,0x7f,0xc6,0xcf,0xd8,0x70,0x70,0xd8,0xcc,0xcc,0x6c,0x38],
"'": [0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x1c,0x0c,0x0e],
'(': [0x00,0x00,0x0c,0x18,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x18,0x0c],
')': [0x00,0x00,0x30,0x18,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x18,0x30],
'*': [0x00,0x00,0x00,0x00,0x99,0x5a,0x3c,0xff,0x3c,0x5a,0x99,0x00,0x00],
'+': [0x00,0x00,0x00,0x18,0x18,0x18,0xff,0xff,0x18,0x18,0x18,0x00,0x00],
',': [0x00,0x00,0x30,0x18,0x1c,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00],
'-': [0x00,0x00,0x00,0x00,0x00,0x00,0xff,0xff,0x00,0x00,0x00,0x00,0x00],
'.': [0x00,0x00,0x00,0x38,0x38,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],
'/': [0x00,0x60,0x60,0x30,0x30,0x18,0x18,0x0c,0x0c,0x06,0x06,0x03,0x03],
'0': [0x00,0x00,0x3c,0x66,0xc3,0xe3,0xf3,0xdb,0xcf,0xc7,0xc3,0x66,0x3c],
'1': [0x00,0x00,0x7e,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x78,0x38,0x18],
'2': [0x00,0x00,0xff,0xc0,0xc0,0x60,0x30,0x18,0x0c,0x06,0x03,0xe7,0x7e],
'3': [0x00,0x00,0x7e,0xe7,0x03,0x03,0x07,0x7e,0x07,0x03,0x03,0xe7,0x7e],
'4': [0x00,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0xff,0xcc,0x6c,0x3c,0x1c,0x0c],
'5': [0x00,0x00,0x7e,0xe7,0x03,0x03,0x07,0xfe,0xc0,0xc0,0xc0,0xc0,0xff],
'6': [0x00,0x00,0x7e,0xe7,0xc3,0xc3,0xc7,0xfe,0xc0,0xc0,0xc0,0xe7,0x7e],
'7': [0x00,0x00,0x30,0x30,0x30,0x30,0x18,0x0c,0x06,0x03,0x03,0x03,0xff],
'8': [0x00,0x00,0x7e,0xe7,0xc3,0xc3,0xe7,0x7e,0xe7,0xc3,0xc3,0xe7,0x7e],
'9': [0x00,0x00,0x7e,0xe7,0x03,0x03,0x03,0x7f,0xe7,0xc3,0xc3,0xe7,0x7e],
':': [0x00,0x00,0x00,0x38,0x38,0x00,0x00,0x38,0x38,0x00,0x00,0x00,0x00],
';': [0x00,0x00,0x30,0x18,0x1c,0x1c,0x00,0x00,0x1c,0x1c,0x00,0x00,0x00],
'<': [0x00,0x00,0x06,0x0c,0x18,0x30,0x60,0xc0,0x60,0x30,0x18,0x0c,0x06],
'=': [0x00,0x00,0x00,0x00,0xff,0xff,0x00,0xff,0xff,0x00,0x00,0x00,0x00],
'>': [0x00,0x00,0x60,0x30,0x18,0x0c,0x06,0x03,0x06,0x0c,0x18,0x30,0x60],
'?': [0x00,0x00,0x18,0x00,0x00,0x18,0x18,0x0c,0x06,0x03,0xc3,0xc3,0x7e],
'@': [0x00,0x00,0x3f,0x60,0xcf,0xdb,0xd3,0xdd,0xc3,0x7e,0x00,0x00,0x00],
'A': [0x00,0x00,0xc3,0xc3,0xc3,0xc3,0xff,0xc3,0xc3,0xc3,0x66,0x3c,0x18],
'B': [0x00,0x00,0xfe,0xc7,0xc3,0xc3,0xc7,0xfe,0xc7,0xc3,0xc3,0xc7,0xfe],
'C': [0x00,0x00,0x7e,0xe7,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xe7,0x7e],
'D': [0x00,0x00,0xfc,0xce,0xc7,0xc3,0xc3,0xc3,0xc3,0xc3,0xc7,0xce,0xfc],
'E': [0x00,0x00,0xff,0xc0,0xc0,0xc0,0xc0,0xfc,0xc0,0xc0,0xc0,0xc0,0xff],
'F': [0x00,0x00,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xfc,0xc0,0xc0,0xc0,0xff],
'G': [0x00,0x00,0x7e,0xe7,0xc3,0xc3,0xcf,0xc0,0xc0,0xc0,0xc0,0xe7,0x7e],
'H': [0x00,0x00,0xc3,0xc3,0xc3,0xc3,0xc3,0xff,0xc3,0xc3,0xc3,0xc3,0xc3],
'I': [0x00,0x00,0x7e,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x7e],
'J': [0x00,0x00,0x7c,0xee,0xc6,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06],
'K': [0x00,0x00,0xc3,0xc6,0xcc,0xd8,0xf0,0xe0,0xf0,0xd8,0xcc,0xc6,0xc3],
'L': [0x00,0x00,0xff,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0],
'M': [0x00,0x00,0xc3,0xc3,0xc3,0xc3,0xc3,0xc3,0xdb,0xff,0xff,0xe7,0xc3],
'N': [0x00,0x00,0xc7,0xc7,0xcf,0xcf,0xdf,0xdb,0xfb,0xf3,0xf3,0xe3,0xe3],
'O': [0x00,0x00,0x7e,0xe7,0xc3,0xc3,0xc3,0xc3,0xc3,0xc3,0xc3,0xe7,0x7e],
'P': [0x00,0x00,0xc0,0xc0,0xc0,0xc0,0xc0,0xfe,0xc7,0xc3,0xc3,0xc7,0xfe],
'Q': [0x00,0x00,0x3f,0x6e,0xdf,0xdb,0xc3,0xc3,0xc3,0xc3,0xc3,0x66,0x3c],
'R': [0x00,0x00,0xc3,0xc6,0xcc,0xd8,0xf0,0xfe,0xc7,0xc3,0xc3,0xc7,0xfe],
'S': [0x00,0x00,0x7e,0xe7,0x03,0x03,0x07,0x7e,0xe0,0xc0,0xc0,0xe7,0x7e],
'T': [0x00,0x00,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0xff],
'U': [0x00,0x00,0x7e,0xe7,0xc3,0xc3,0xc3,0xc3,0xc3,0xc3,0xc3,0xc3,0xc3],
'V': [0x00,0x00,0x18,0x3c,0x3c,0x66,0x66,0xc3,0xc3,0xc3,0xc3,0xc3,0xc3],
'W': [0x00,0x00,0xc3,0xe7,0xff,0xff,0xdb,0xdb,0xc3,0xc3,0xc3,0xc3,0xc3],
'X': [0x00,0x00,0xc3,0x66,0x66,0x3c,0x3c,0x18,0x3c,0x3c,0x66,0x66,0xc3],
'Y': [0x00,0x00,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x3c,0x66,0x66,0xc3],
'Z': [0x00,0x00,0xff,0xc0,0xc0,0x60,0x30,0x7e,0x0c,0x06,0x03,0x03,0xff],
'[': [0x00,0x00,0x3c,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x3c],
'\\': [0x00,0x03,0x03,0x06,0x06,0x0c,0x0c,0x18,0x18,0x30,0x30,0x60,0x60],
']': [0x00,0x00,0x3c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x3c],
'^': [0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xc3,0x66,0x3c,0x18],
'_': [0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],
'`': [0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x38,0x30,0x70],
'a': [0x00,0x00,0x7f,0xc3,0xc3,0x7f,0x03,0xc3,0x7e,0x00,0x00,0x00,0x00],
'b': [0x00,0x00,0xfe,0xc3,0xc3,0xc3,0xc3,0xfe,0xc0,0xc0,0xc0,0xc0,0xc0],
'c': [0x00,0x00,0x7e,0xc3,0xc0,0xc0,0xc0,0xc3,0x7e,0x00,0x00,0x00,0x00],
'd': [0x00,0x00,0x7f,0xc3,0xc3,0xc3,0xc3,0x7f,0x03,0x03,0x03,0x03,0x03],
'e': [0x00,0x00,0x7f,0xc0,0xc0,0xfe,0xc3,0xc3,0x7e,0x00,0x00,0x00,0x00],
'f': [0x00,0x00,0x30,0x30,0x30,0x30,0x30,0xfc,0x30,0x30,0x30,0x33,0x1e],
'g': [0x7e,0xc3,0x03,0x03,0x7f,0xc3,0xc3,0xc3,0x7e,0x00,0x00,0x00,0x00],
'h': [0x00,0x00,0xc3,0xc3,0xc3,0xc3,0xc3,0xc3,0xfe,0xc0,0xc0,0xc0,0xc0],
'i': [0x00,0x00,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x18,0x00],
'j': [0x38,0x6c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x00,0x00,0x0c,0x00],
'k': [0x00,0x00,0xc6,0xcc,0xf8,0xf0,0xd8,0xcc,0xc6,0xc0,0xc0,0xc0,0xc0],
'l': [0x00,0x00,0x7e,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x78],
'm': [0x00,0x00,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xfe,0x00,0x00,0x00,0x00],
'n': [0x00,0x00,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xfc,0x00,0x00,0x00,0x00],
'o': [0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00],
'p': [0xc0,0xc0,0xc0,0xfe,0xc3,0xc3,0xc3,0xc3,0xfe,0x00,0x00,0x00,0x00],
'q': [0x03,0x03,0x03,0x7f,0xc3,0xc3,0xc3,0xc3,0x7f,0x00,0x00,0x00,0x00],
'r': [0x00,0x00,0xc0,0xc0,0xc0,0xc0,0xc0,0xe0,0xfe,0x00,0x00,0x00,0x00],
's': [0x00,0x00,0xfe,0x03,0x03,0x7e,0xc0,0xc0,0x7f,0x00,0x00,0x00,0x00],
't': [0x00,0x00,0x1c,0x36,0x30,0x30,0x30,0x30,0xfc,0x30,0x30,0x30,0x00],
'u': [0x00,0x00,0x7e,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00],
'v': [0x00,0x00,0x18,0x3c,0x3c,0x66,0x66,0xc3,0xc3,0x00,0x00,0x00,0x00],
'w': [0x00,0x00,0xc3,0xe7,0xff,0xdb,0xc3,0xc3,0xc3,0x00,0x00,0x00,0x00],
'x': [0x00,0x00,0xc3,0x66,0x3c,0x18,0x3c,0x66,0xc3,0x00,0x00,0x00,0x00],
'y': [0xc0,0x60,0x60,0x30,0x18,0x3c,0x66,0x66,0xc3,0x00,0x00,0x00,0x00],
'z': [0x00,0x00,0xff,0x60,0x30,0x18,0x0c,0x06,0xff,0x00,0x00,0x00,0x00],
'{': [0x00,0x00,0x0f,0x18,0x18,0x18,0x38,0xf0,0x38,0x18,0x18,0x18,0x0f],
'|': [0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18],
'}': [0x00,0x00,0xf0,0x18,0x18,0x18,0x1c,0x0f,0x1c,0x18,0x18,0x18,0xf0],
'~': [0x00,0x00,0x00,0x00,0x00,0x00,0x06,0x8f,0xf1,0x60,0x00,0x00,0x00]
}
#-------------------------------------------------------------------------------
def add_gaussian_noise (im, mean=0.0, sd=1.0, seed=None):
"""
Add Gaussian-distributed noise to each pixel of an image.
Arguments:
im the image to which noise will be added
mean the mean of the Gaussian-distributed noise (default: 0.0)
sd the standard deviation of the noise (default: 1.0)
seed if supplied, this is used to seed the random number generator
"""
if not seed is None: numpy.random.seed (seed)
im += numpy.random.normal (mean, sd, im.shape)
#-------------------------------------------------------------------------------
def annular_mean (im, y0=None, x0=None, rlo=0.0, rhi=None, alo=-math.pi,
ahi=math.pi):
"""
Return the mean of an annular region of an image.
Arguments:
im the image to be examined
y0 the y-value of the centre of the rotation (default: centre pixel)
x0 the x-value of the centre of the rotation (default: centre pixel)
rlo the inner radius of the annular region
rhi the outer radius of the annular region
alo the lower angle of the annular region (default: -pi)
ahi the higher angle of the annular region (default: pi)
"""
# Fill in the default values as necessary.
ny, nx, nc = sizes (im)
if y0 is None: y0 = ny / 2.0
if x0 is None: x0 = nx / 2.0
if rhi is None: rhi = math.sqrt ((nx - x0)**2 + (ny - y0)**2)
ave = num = 0.0
# Cycle through the image.
for y in xrange (0, ny):
yy = (y - y0)**2
for x in xrange (0, nx):
r = math.sqrt (yy + (x-x0)**2)
if r <= 0.0: angle = 0.0
else: angle = -math.atan2 (y-y0, x-x0)
for c in xrange (0, nc):
if angle >= alo and angle <= ahi and r >= rlo and r <= rhi:
ave += im[y,x,c]
num += 1
if num > 0: ave /= num
return ave
#-------------------------------------------------------------------------------
def annular_set (im, v, y0=None, x0=None, rlo=0.0, rhi=None, alo=-math.pi,
ahi=math.pi):
"""
Set an annular region of an image.
Arguments:
im the image to be set (modified)
v value to which the region is to be set
y0 the y-value of the centre of the rotation (default: centre pixel)
x0 the x-value of the centre of the rotation (default: centre pixel)
rlo the inner radius of the annular region
rhi the outer radius of the annular region
alo the lower angle of the annular region (default: -pi)
ahi the higher angle of the annular region (default: pi)
"""
# Fill in the default values as necessary.
ny, nx, nc = sizes (im)
if y0 is None: y0 = ny / 2.0
if x0 is None: x0 = nx / 2.0
if rhi is None: rhi = math.sqrt ((nx - x0)**2 + (ny - y0)**2)
# Cycle through the image.
for y in xrange (0, ny):
yy = (y - y0)**2
for x in xrange (0, nx):
r = math.sqrt (yy + (x-x0)**2)
if r <= 0.0: angle = 0.0
else: angle = -math.atan2 (y-y0, x-x0)
if angle >= alo and angle <= ahi and r >= rlo and r <= rhi:
im[y,x] = v
#-------------------------------------------------------------------------------
def ascii_art (im, using=["* ", "@#+- ", "#XXXX/' "], fd=sys.stdout,
ff=False, aspect_ratio=1.95, width=132, border="tblr",
reverse=False, limits=None):
"""
Output an image as characters, optionally with overprinting.
Arguments:
im image to be printed
using list of characters, each defining one layers of
overprinting in order of decreasing blackness
(default: three layers ["* ", "@#+- ", "#8XXX/' "])
fd file on which the output is written (default: sys.stdout)
ff if True, output a form-feed before printing the image
(default: false)
aspect_ratio ratio of character height to width (default: 1.95)
width number of characters in each line of output (default: 132)
border how the image should have its border drawn, a string of
characters from 'news' or 'tblr'
reverse if True, reverse the contrast (default: False)
limits if supplied, a list comprising the minimum and maximum
image values that should be used for determining the
mapping onto characters; the limits between the image
should be clipped
"""
# If using is a string, we don't overprint. A good set of characters in
# that case is using="#@X+/' " for terminal windows with a light
# background. If using is a list, we assume it's a list of strings,
# giving the different levels of overprinting. In that case, the default
# set looks OK.
chars = []
if isinstance (using, str):
nover = 1
chars.append(using)
elif isinstance (using, list):
chars = using
nover = len(using)
else:
raise ValueError, 'Illegal argument type'
nvals = len (chars[0])
# Work out how many pixels we're to print across the output.
(ny, nx, nc) = sizes (im)
if nx < width: xmax = nx
else: xmax = width - 2
xinc = nx / xmax
yinc = xinc * aspect_ratio
ymax = int (ny / yinc + 0.5)
# Work out the grey-level scaling factor.
if limits is None: lo, hi = extrema (im)
else: lo, hi = limits
if hi == lo: hi += 1
fac = (nvals - 1) / (hi - lo)
# Decide which borders we're to print. If we'r to print the top or
# bottom border, work it out.
doN = doE = doS = doW = False
if string.find (border, 'n') >= 0 or string.find (border, 't') >= 0:
doN = True
if string.find (border, 'e') >= 0 or string.find (border, 'r') >= 0:
doE = True
if string.find (border, 's') >= 0 or string.find (border, 'b') >= 0:
doS = True
if string.find (border, 'w') >= 0 or string.find (border, 'l') >= 0:
doW = True
if doN or doS:
sep = '+'
for x in xrange (0, xmax):
if x % 5 == 4: sep += '+'
else: sep += '-'
sep += '+'
# Arrange to print a form-feed, if necessary, and print the top border.
if ff: ffc = ''
else: ffc = ''
if doN:
print >>fd, ffc + sep
ffc = ''
# Print the image.
buf = numpy.zeros (xmax, int)
fy = 0
for y in xrange (0, ymax):
dy = fy - int(fy)
dy1 = 1.0 - dy
ylo = int(fy) % ny
yhi = (ylo + 1) % ny
ib = -1
# For each pixel along a line, average all the channels to one and
# scale the value into the appropriate number of levels.
fx = 0
for x in xrange (0, xmax):
dx = fx - int(fx)
dx1 = 1.0 - dx
xlo = int(fx) % nx
xhi = (xlo + 1) % nx
v = 0
for c in xrange (0, nc):
vc = dx1 * dy1 * im[ylo,xlo,c] + \
dx * dy1 * im[ylo,xhi,c] + \
dx1 * dy * im[yhi,xlo,c] + \
dx * dy * im[yhi,xhi,c]
v += vc
v /= nc
v = int ((v - lo) * fac + 0.5)
if v < 0: v = 0
if v >= nvals: v = nvals - 1
ib += 1
buf[ib] = v
fx += xinc
fy += yinc
# Print the line, including the borders if appropriate. The traditional
# way of doing this is as a series of lines using the carriage return
# character '\r' so that subsequent lines over-print the first; but '\r'
# is the end-of-line delimiter on Macintoshes, which confuses things.
# So we have to backspace after each character in order to over-print.
# And so technology moves on...
if doW or doE:
if y % 5 == 4: mark = '+'
else: mark = '|'
else: mark = ' '
line = ' '
if doW: line = mark
for x in xrange (0, len(buf)):
for ov in xrange (0, nover-1):
line += chars[ov][buf[x]] + '\b'
line += chars[nover-1][buf[x]]
if doE: line += mark
print >>fd, ffc + line
ffc = ''
# Print the bottom border.
if doS: print >>fd, sep
#-------------------------------------------------------------------------------
def binarize (im, threshold, bg=0.0, fg=max_image_value):
"""
Binarize an image, returning the result.
Arguments:
im image to be binarized
threshold the threshold to be used for binarization
bg value to which pixels at or below the threshold will be set
(default: 0.0)
fg value to which pixels equal to or above the threshold will
be set (default: 255.0)
"""
bim = image (im)
set (bim, bg)
bim[numpy.where (im >= threshold)] = fg
return bim
#-------------------------------------------------------------------------------
def blend_pixel (im, y, x, v, opac):
"""
Blend the value v into the pixel im[y,x] according to the opacity opac.
Arguments:
im image in which the pixel is drawn (modified)
y y-position of the pixel to be modified
x x-position of the pixel to be modified
v new value to which the pixel is to be set
opac opacity with which the value will be drawn into the pixel
"""
ny, nx, nc = sizes (im)
if y >= 0 and y < ny and x >= 0 and x < nx:
if not isinstance (v, list): v = [v] * nc
for c in xrange (0, nc):
im[y,x,c] = v[c] * opac + (1.0 - opac) * im[y,x,c]
#-------------------------------------------------------------------------------
def canny (im, lo, hi):
"""
Perform edge detection in im using the Canny operator.
Three EVE images are returned: the first contains the gradient
magnitudes at each pixel; the second contains the gradient
magnitudes after non-maximum supression and the third contains the
final edges after hysteresis thresholding.
Arguments:
im image in which the edges are to be found
lo threshold below which edge segments are discarded
hi threshold above which edge segments are definitely edges
The original implementation of this operator was by Zachary Pincus
<zachary.pincus@yale.edu>, adapted to work with EVE-format images.
"""
import scipy
import scipy.ndimage as ndimage
# Convert the EVE-format image into one compatible with scipy.
ny, nx, nc = sizes (im)
if nc == 1: sci_im = im[:,:,0]
else: sci_im = mono(im)[:,:,0]
# The following filter kernels are for calculating the value of
# neighbours in the required directions.
_N = scipy.array([[0, 1, 0],
[0, 0, 0],
[0, 1, 0]], dtype=bool)
_NE = scipy.array([[0, 0, 1],
[0, 0, 0],
[1, 0, 0]], dtype=bool)
_W = scipy.array([[0, 0, 0],
[1, 0, 1],
[0, 0, 0]], dtype=bool)
_NW = scipy.array([[1, 0, 0],
[0, 0, 0],
[0, 0, 1]], dtype=bool)
# After quantizing the orientations of gradients, vertical
# (north-south) edges get values of 3, northwest-southeast edges get
# values of 2, and so on, as below.
_NE_d = 0
_W_d = 1
_NW_d = 2
_N_d = 3
grad_x = ndimage.sobel(sci_im, 0)
grad_y = ndimage.sobel(sci_im, 1)
grad_mag = scipy.sqrt(grad_x**2+grad_y**2)
grad_angle = scipy.arctan2(grad_y, grad_x)
# Scale the angles in the range [0,3] and then round to quantize.
quantized_angle = scipy.around(3 * (grad_angle + numpy.pi) / (numpy.pi * 2))
# Perform non-maximal suppression. An edge pixel is only good if
# its magnitude is greater than its neighbours normal to the edge
# direction. We quantize the edge direction into four angles, so we
# only need to look at four sets of neighbours.
NE = ndimage.maximum_filter(grad_mag, footprint=_NE)
W = ndimage.maximum_filter(grad_mag, footprint=_W)
NW = ndimage.maximum_filter(grad_mag, footprint=_NW)
N = ndimage.maximum_filter(grad_mag, footprint=_N)
thinned = (((grad_mag > W) & (quantized_angle == _N_d )) |
((grad_mag > N) & (quantized_angle == _W_d )) |
((grad_mag > NW) & (quantized_angle == _NE_d)) |
((grad_mag > NE) & (quantized_angle == _NW_d)) )
thinned_grad = thinned * grad_mag
# Perform hysteresis thresholding: find seeds above thr high
# threshold, then expand out until the line segment goes below the
# low threshold.
high = thinned_grad > hi
low = thinned_grad > lo
canny_edges = ndimage.binary_dilation(high, structure=scipy.ones((3,3)),
iterations=-1, mask=low)
# Convert the results back to EVE-format images and return them.
gm = image ((ny,nx,1))
tm = image ((ny,nx,1))
ce = image ((ny,nx,1))
gm[:,:,0] = grad_mag[:,:]
tm[:,:,0] = thinned_grad[:,:]
ce[:,:,0] = canny_edges[:,:] * max_image_value
return gm, tm, ce
#-------------------------------------------------------------------------------
def centroid (im, c=0):
"""
Return the centroid of a channel of an image.
This routine is normally used on a binarized image (see binarize())
after labelling (see label_regions() and labelled_region()) to
locate the centres of regions.
Arguments:
im image for which the centroid is to be found
c channel to be examined (default: 0)
"""
m00 = m01 = m10 = 0.0
ny, nx, nc = sizes (im)
for y in xrange (0, ny):
for x in xrange (0, nx):
m00 += im[y,x,c]
m10 += im[y,x,c] * y
m01 += im[y,x,c] * x
if m00 < tiny:
y = ny / 2.0
x = nx / 2.0
else:
y = m10 / m00
x = m01 / m00
return [y, x]
#-------------------------------------------------------------------------------
def clip (im, lo, hi):
"""
Ensure all pixels in an image are in the range lo to hi.
Arguments:
im the image to be clipped (modified)
lo the lowest value to be in the image after clipping
hi the highest value to be in the image after clipping
"""
numpy.clip (im, lo, hi, out=im)
#-------------------------------------------------------------------------------
def compare (im1, im2, tol=tiny, report=20, indent=' ', fd=sys.stdout):
"""
Compare two images, reporting up to report pixels that differ.
The routine returns the number of differences found.
Arguments:
im1 image to be compared with im2
im2 image to be compared with im1
tol minimum amount by which pixels must differ (default: eve.tiny)
report the maximum number of differences reported (default: 20)
(the presence of further differences is indicated by '...')
indent indentation output before a difference (default: ' ')
fd file on which the output is to be written (default: sys.stdout)
"""
ny, nx, nc = sizes (im1)
ndiffs = 0
diffs = []
for y in xrange (0, ny):
for x in xrange (0, nx):
for c in xrange (0, nc):
if abs (im1[y,x,c] - im2[y,x,c]) > tol:
ndiffs += 1
if ndiffs <= report:
diffs.append ([y,x,c])
if ndiffs > 0 and report > 0:
print >>fd, ndiffs, 'differences found:'
for d in xrange (0, len(diffs)):
y,x,c = diffs[d]
print >>fd, indent, y,x,c, '->', im1[y,x,c], '&', im2[y,x,c]
if ndiffs > report: print >>fd, indent, '...'
return ndiffs
#-------------------------------------------------------------------------------
def contrast_stretch (im, low=0.0, high=max_image_value):
"""
Stretch the contrast in the image to the supplied low and high values.
Arguments:
im image whose contrast is to be stretched (modified)
low new value to which the lowest value in im is to be scaled
(default: 0.0)
high new value to which the highest value in im is to be scaled
(default: 255.0)
"""
oldmin, oldmax = extrema (im)
fac = (high - low) / (oldmax - oldmin)
# For some reason, the following line doesn't work but the subsequent
# three do!
# im = (im - oldmin) * fac + low
im -= oldmin
im *= fac
im += low
#-------------------------------------------------------------------------------
def convolve (im, mask, statistic='sum'):
"""
Perform a convolution of im with mask, returning the result.
Arguments:
im the image to be convolved with mask (modified)
mask the convolution mask to be used
statistic one of:
sum conventional convolution
mean conventional convolution
median median filtering
min grey-scale shrink (reduces light areas)
max grey-scale expand (enlarges light areas)
"""
ny, nx, nc = sizes (im)
my, mx, mc = sizes (mask)
yo = my // 2
xo = mx // 2
# Create an output image of the same size as the input.
result = image (im)
# We need a special case for 'min' statistic to erase the mask elements
# that are zero.
nzeros = len ([x for x in mask.ravel() if x == 0])
# Loop over the pixels in the image. For each pixel position, multiply
# the region around it with the mask, summing the elements and storing
# that in the equivalent pixel of the output image.
v = numpy.zeros ((my*mx*mc))
vi = 0
for yi in xrange (0, ny):
for xi in xrange (0, nx):
for ym in xrange (0, my):
yy = (ym + yi - yo) % ny
for xm in xrange (0, mx):
xx = (xm + xi - xo) % nx
v[vi] = im[yy,xx,0] * mask[ym,xm,0]
vi += 1
if statistic == 'sum': ave = numpy.sum (v)
elif statistic == 'mean': ave = numpy.mean (v)
elif statistic == 'max': ave = numpy.max (v)
elif statistic == 'min': ave = numpy.min (v[nzeros:])
elif statistic == 'median': ave = numpy.median (v)
result[yi,xi,0] = ave
vi = 0
return result
#-------------------------------------------------------------------------------
def copy (im):
"""
Copy the pixels from image 'im' into a new image, which is returned.
Arguments:
im the image to be copied
"""
return im.copy ()
#-------------------------------------------------------------------------------
def correlate (im1, im2):
"""
Return the unnormalized Fourier correlation surface between two images.
Arguments:
im1 image to be correlated with im2
im2 image to be correlated with im1
"""
# Calculate the normalization factor (which doesn't appear to be right).
v1 = sd (im1)**2
v2 = sd (im2)**2
fac = math.sqrt (v1 * v2)
# Transform, invert one, multiply, invert, shift peaks to the right
# place, normalize, and return the result.
temp1 = numpy.fft.fft2 (im1, axes=(-3,-2))
temp2 = numpy.fft.fft2 (im2, axes=(-3,-2))
reflect_horizontally (temp2)
reflect_vertically (temp2)
temp2 *= temp1
temp1 = numpy.fft.ifft2 (temp2, axes=(-3,-2))
temp1 = numpy.fft.fftshift (temp1, axes=(-3,-2))
temp1 /= fac
return temp1
#-------------------------------------------------------------------------------
def correlation_coefficient (im1, im2):
"""
Return the correlation coefficient between two images.
Arguments:
im1 image to be correlated with im2
im2 image to be correlated with im1
"""
ny, nx, nc = sizes (im1)
sumx = sumy = sumxx = sumyy = sumxy = 0.0
for y in xrange (0, ny):
for x in xrange (0, nx):
for c in xrange (0, nc):
v1 = im1[y,x,c]
v2 = im2[y,x,c]
sumx += v1
sumy += v2
sumxx += v1 * v1
sumxy += v1 * v2
sumyy += v2 * v2
n = ny * nx * nc
v1 = sumxy - sumx * sumy / n
v2 = math.sqrt((sumxx-sumx*sumx/n) * (sumyy-sumy*sumy/n))
return v1 / v2
#-------------------------------------------------------------------------------
def covariance (im):
"""
Return the covariance matrix and means of the channels of im.
Arguments:
im image for which the covariance matrix is to be calculated
"""
ny, nx, nc = sizes (im)
covmat = numpy.ndarray ((nc, nc))
ave = numpy.ndarray ((nc))
for c in xrange (0, nc):
ch = get_channel (im, c)
ave[c] = mean (ch)
for c1 in xrange (0, nc):
ch1 = get_channel (im, c1)
for c2 in xrange (0, c1+1):
ch2 = get_channel (im, c2)
covmat[c1,c2] = ((ch1 - ave[c1]) * (ch2 - ave[c2])).mean()
covmat[c2,c1] = covmat[c1,c2]
return covmat, ave
#-------------------------------------------------------------------------------
def cumulative_histogram (im, bins=64, limits=None, disp=False):
"""
Find the cumulative histogram of an image.
Arguments:
im image for which the cumulative histogram is to be found
bins number of bins in the histogram (default: 64)
limits extrema between which the histogram is to be found
plot when True, the histogram will be drawn
"""
a, h = histogram (im, bins=bins, limits=limits, disp=False)
h = h.cumsum()
if disp: graph (a, h, 'Cumulative histogram', 'bin', 'number of pixels',
style='histogram')
return a, h
#-------------------------------------------------------------------------------
def display (im, stretch=False, program=None, wait=False, name="EVE image",
hint=False):
"""
Display an image using an external program.
Arguments:
im image to be displayed
stretch if True, displayed image will be contrast-stretched
program external program to be used for display (default: system-dependent)
wait when True, allow the display program to exit before returning
"""
if systype == 'Windows':
if stretch:
copy = im.copy()
contrast_stretch (copy)
else:
copy = im
output_pil (copy, '', 'display') # temporary kludge
return
# According to the website (spread over two lines of comments here):
# http://www.velocityreviews.com/forums/
# t707158-python-pil-and-vista-windows-7-show-not-working.html
# Windows Vista needs the following workaround for image display via
# Pil to work properly:
# Edit (e.g., with TextPad) the file
# C:\Python26\lib\site-packages\PIL\ImageShow.py
# Around line 99, edit the existing line to include a ping command,
# as follows (spread over two lines of comments here):
# return "start /wait %s && PING 127.0.0.1 -n 5
# > NUL && del /f %s" % (file, file)
if program is None: program = 'mspaint'
handle, fn = tempfile.mkstemp (suffix='.bmp')
output_bmp (copy, fn)
if hint: print >>sys.stderr, \
'Type "Control-q" in the image window to close it.'
line = "%s %s && ping 127.0.0.1 -n 15 > NUL && del /f %s" % \
(program, fn, fn)
if wait: line = 'start /wait ' + line
else: line = 'start /wait' + line # must wait on Windows, I think
os.system (line)
else:
if program is None:
if find_in_path ('xv'):
program = 'xv -name "' + name + '"'
if hint: print >>sys.stderr, \
'Type "q" in the image window to close it.'
elif find_in_path ('display'):
program = 'display'
if hint: print >>sys.stderr, \
'Type "q" in the image window to close it.'
elif systype == 'Darwin':
if stretch:
copy = im.copy()
contrast_stretch (copy)
else:
copy = im
handle, fn = tempfile.mkstemp (suffix='.png')
output_png (copy, fn)
if hint: print >>sys.stderr, \
'Type "Command-q" in the image window to close it.'
line = "%s '%s'; sleep 5; rm -f '%s'"
if not wait: line = "(" + line + ")&"
line = line % ("open -a /Applications/Preview.app", fn, fn)
os.system (line)
return
else:
raise ValueError, 'Cannot find an image display program'
handle, fn = tempfile.mkstemp ()
output_pnm (im, fn, stretch=stretch)
if wait: line = "%s %s; rm -f %s" % (program, fn, fn)
else: line = "(%s %s; rm -f %s)&" % (program, fn, fn)
os.system (line)
os.close (handle)
#-------------------------------------------------------------------------------
def draw_border (im, v=max_image_value, width=2):
"""
Draw a border around an image.
Arguments:
im image to which the border is to be added (modified)
v value to which the border is to be set (default: 255)
width width of the border in pixels (default: 2)
"""
ny, nx, nc = sizes (im)
im[0:width,:,:] = v # top
im[ny-width-1:ny,:,:] = v # bottom
im[:,0:width,:] = v # left
im[:,nx-width-1:nx,:] = v # right
#-------------------------------------------------------------------------------
def draw_box (im, ylo, xlo, yhi, xhi, border=max_image_value, fill=None):
"""
Draw a rectangular box, optionally filled.
Arguments:
im image in which the box is to be drawn (modified)
ylo y-value of the lower left corner of the box
xlo x-value of the lower left corner of the box
yhi y-value of the upper right corner of the box
xhi x-value of the upper right corner of the box
border value used for drawing the box (default: 255.0)
fill value with which the inside of the box is to be filled
(default: None, and so is unfilled)
"""
draw_line_fast (im, ylo, xlo, ylo, xhi, border)
draw_line_fast (im, ylo, xhi, yhi, xhi, border)
draw_line_fast (im, yhi, xhi, yhi, xlo, border)
draw_line_fast (im, yhi, xlo, ylo, xlo, border)
if not fill is None: set_region (im, ylo+1, xlo+1, yhi, xhi, fill)
#-------------------------------------------------------------------------------
def draw_circle (im, yc, xc, r, v, fill=None):
"""
Draw a circle with value v of radius r centred on (xc, yc) in image im.
Arguments:
im image upon which the circle is to be drawn (modified)
yc y-value (row) of the centre of the circle
xc x-value (column) of the centre of the circle
r radius of he circle in pixels
v value to which pixels forming the circle are set
fill value with which the inside of the circle is to be filled
(default: None, and so is unfilled)
The circle is anti-aliased, using an algorithm due to Xiaolin Wu
(published in "Graphics Gems II"). Although fairly fast, it is slower
than Bresenham's algorithm, used in the routine draw_circle_fast, and
paints a range of values into the image; if it is important that all
pixels of the line have the value v, use draw_line_fast.
The implementation was adapted from the PHP code in
http://mapidev.blogspot.com/2009/03/xiaolin-wu-circle-php-implementation.html
"""
x = xx = r
y = yy = -1
t = 0
while x > y:
y += 1
d = math.sqrt (r**2 - y**2)
opac = int (d + 0.5) - d
if opac < t: x -= 1
trans = 1.0 - opac
im[yc+y, xc+x] = v
blend_pixel (im, y + yc, x + xc - 1, v, trans)
blend_pixel (im, y + yc, x + xc + 1, v, opac)
im[yc+x, xc+y] = v
blend_pixel (im, x + yc - 1, y + xc, v, trans)
blend_pixel (im, x + yc + 1, y + xc, v, opac)
im[yc+y, xc-x] = v
blend_pixel (im, y + yc, xc - x + 1, v, trans)
blend_pixel (im, y + yc, xc - x - 1, v, opac)
im[yc+x, xc-y] = v
blend_pixel (im, x + yc - 1, xc - y, v, trans)
blend_pixel (im, x + yc + 1, xc - y, v, opac)
im[yc-y, xc+x] = v
blend_pixel (im, yc - y, x + xc - 1, v, trans)
blend_pixel (im, yc - y, x + xc + 1, v, opac)
im[yc-x, xc+y] = v
blend_pixel (im, yc - x - 1, y + xc, v, opac)
blend_pixel (im, yc - x + 1, y + xc, v, trans)
im[yc-x, xc-y] = v
blend_pixel (im, yc - x - 1, xc - y, v, opac)
blend_pixel (im, yc - x + 1, xc - y, v, trans)
im[yc-y, xc-x] = v
blend_pixel (im, yc - y, xc - x - 1, v, opac)
blend_pixel (im, yc - y, xc - x + 1, v, trans)
t = opac
if not fill is None: fill_outline (im, yc, yc, v)
#-------------------------------------------------------------------------------
def draw_circle_fast (im, yc, xc, r, v, fill=None):
"""
Draw a circle with value v of radius r centred on (xc, yc) in image im.
Arguments:
im image upon which the circle is to be drawn (modified)
yc y-value (row) of the centre of the circle
xc x-value (column) of the centre of the circle
r radius of he circle in pixels
v value to which pixels forming the circle are set
fill value with which the inside of the circle is to be filled
(default: None, and so is unfilled)
"""
x = 0
y = r
p = 3 - 2 * r
ny, nx, nc = sizes (im)
while x < y:
im[yc+y,xc+x] = v
im[yc+y,xc-x] = v
im[yc-y,xc+x] = v
im[yc-y,xc-x] = v
im[yc+x,xc+y] = v
im[yc+x,xc-y] = v
im[yc-x,xc+y] = v
im[yc-x,xc-y] = v
if p < 0:
p += 4 * x + 6
else:
p += 4 * (x - y) + 6
y -= 1
x += 1
if x == y:
im[yc+y,xc+x] = v
im[yc+y,xc-x] = v
im[yc-y,xc+x] = v
im[yc-y,xc-x] = v
im[yc+x,xc+y] = v
im[yc+x,xc-y] = v
im[yc-x,xc+y] = v
im[yc-x,xc-y] = v
if not fill is None: fill_outline (im, yc, xc, v)
#-------------------------------------------------------------------------------
def draw_line (im, y0, x0, y1, x1, v):
"""
Draw a line from (x0, y0) to (x1, y1) with value v in image im.
Arguments:
im image upon which the line is to be drawn (modified)
y0 y-value (row) of the start of the line
x0 x-value (column) of the start of the line
y1 y-value (row) of the end of the line
x1 x-value (column) of the end of the line
v value to which pixels on the line are to be set
The line is anti-aliased, using an algorithm due to Xiaolin Wu ("An
Efficient Antialiasing Technique", Computer Graphics July 1991); the
code is a corrected version of that in the relevant Wikipedia entry.
The algorithm draws pairs of pixels straddling the line, coloured
according to proximity; pixels at the line ends are handled separately.
Although fairly fast, it is slower than Bresenham's algorithm and
paints a range of values into the image; if it is important that all
pixels of the line have the value v, there is a separate routine,
draw_line_fast, which implements Bresnham's algorithm.
"""
if abs(y1 - y0) > abs(x1 - x0): steep = True
else: steep = False
if steep:
y0, x0 = x0, y0
y1, x1 = x1, y1
if x0 > x1:
x1, x0 = x0, x1
y1, y0 = y0, y1
dx = x1 - x0 + 0.0
dy = y1 - y0
if dx == 0.0: de = 1.0e30
else: de = dy / dx
# Handle the first end-point.
xend = int (x0 + 0.5)
yend = y0 + de * (xend - x0)
xgap = 1.0 - (x0 + 0.5 - int(x0 + 0.5))
xpxl1 = int (xend) # this will be used in the main loop
ypxl1 = int (yend)
if steep:
blend_pixel (im, xpxl1, ypxl1, v, 1.0 - (yend - int(yend)))
blend_pixel (im, xpxl1, ypxl1+1, v, yend - int(yend))
else:
blend_pixel (im, ypxl1, xpxl1, v, 1.0 - (yend - int(yend)))
blend_pixel (im, ypxl1+1, xpxl1, v, yend - int(yend))
intery = yend + de # first y-intersection for the main loop
# Handle the second end-point.
xend = int (x1 + 0.5)
yend = y1 + de * (xend - x1)
xgap = x1 + 0.5 - int(x1 + 0.5)
xpxl2 = int (xend) # this will be used in the main loop
ypxl2 = int (yend)
if steep:
blend_pixel (im, xpxl2, ypxl2, v, 1.0 - (yend - int (yend)))
blend_pixel (im, xpxl2, ypxl2+1, v, yend - int(yend))
else:
blend_pixel (im, ypxl2, xpxl2, v, 1.0 - (yend - int (yend)))
blend_pixel (im, ypxl2+1, xpxl2, v, yend - int(yend))
# The main loop.
for x in xrange (xpxl1+1, xpxl2):
if steep:
blend_pixel (im, x, int (intery), v,
math.sqrt(1.0 - (intery - int(intery))))
blend_pixel (im, x, int (intery)+1, v,
math.sqrt (intery - int(intery)))
else:
blend_pixel (im, int (intery), x, v,
math.sqrt(1.0 - (intery - int(intery))))
blend_pixel (im, int (intery)+1, x, v,
math.sqrt(intery - int(intery)))
intery += de
#-------------------------------------------------------------------------------
def draw_line_fast (im, y0, x0, y1, x1, v):
"""
Draw a line from (x0, y0) to (x1, y1) with value v in image im.
Arguments:
im image upon which the line is to be drawn (modified)
y0 y-value (row) of the start of the line
x0 x-value (column) of the start of the line
y1 y-value (row) of the end of the line
x1 x-value (column) of the end of the line
v value to which pixels on the line are to be set
This routine uses the classic line-drawing due to Bresenham, which
aliases badly for most lines; if appearance is more important than
speed, there is a separate EVE routine that implements anti-aliased
line-drawing using an algorithm due to Xiaolin Wu.
"""
ny, nx, nc = sizes (im)
y0 = int (y0)
x0 = int (x0)
y1 = int (y1)
x1 = int (x1)
if abs(y1 - y0) > abs(x1 - x0): steep = True
else: steep = False
if steep:
y0, x0 = x0, y0
y1, x1 = x1, y1
if x0 > x1:
x1, x0 = x0, x1
y1, y0 = y0, y1
dx = x1 - x0 + 0.0
dy = abs(y1 - y0)
e = 0.0
if dx == 0.0: de = 1.0e30
else: de = dy / dx
y = y0
if y0 < y1: ystep = 1
else: ystep = -1
for x in xrange (x0,x1+1):
if steep:
if x >= 0 and x < ny and y >= 0 and y < nx: im[x,y,:] = v
else:
if x >= 0 and x < nx and y >= 0 and y < ny: im[y,x,:] = v
e += de
if e >= 0.5:
y += ystep
e -= 1.0
#-------------------------------------------------------------------------------
def draw_polygon (im, yc, xc, r, nsides, v=max_image_value, fast=False,
fill=False):
"""
Draw an nsides-sided polygon of radius r centred at (yc, xc), returning
a list of its vertices.
Arguments:
im image upon which the text is to be written (modified)
yc y-value (row) of the centre of the polygon
xc x-value (column) of the centre of the polygon
radius radius of the circle in which the polygon is enclosed
nsides number of sides that the polygon is to have
v value to which pixels are to be set (default: 255.0)
fast if True, don't use anti-aliased lines (default: False)
"""
angle = 2.0 * math.pi / nsides
y0 = yc
x0 = xc + r
vertices = []
for i in xrange (1, nsides+1):
vertices.append ((y0, x0))
y1 = yc + r * math.sin (i * angle)
x1 = xc + r * math.cos (i * angle)
if fast: draw_line_fast (im, y0, x0, y1, x1, v)
else: draw_line (im, y0, x0, y1, x1, v)
y0 = y1
x0 = x1
if fill: fill_outline (im, yc, xc, v)
return vertices
#-------------------------------------------------------------------------------
def draw_star (im, yc, xc, radius, npoints, inner_radius=None,
v=max_image_value, fast=False, fill=False):
"""
Draw an npoints-pointed star of radius r centred at (yc, xc).
Arguments:
im image upon which the text is to be written (modified)
yc y-value (row) of the centre of the star
xc x-value (column) of the centre of the star
radius radius of the circle in which the polygon is enclosed
npoints number of points that the star is to have
inner_radius radius of the inner parts of the star
v value to which pixels are to be set (default: 255.0)
fast if True, don't use anti-aliased lines (default: False)
"""
angle = math.pi / npoints
if inner_radius is None: inner_radius = radius / 2
y0 = yc
x0 = xc + radius
np = 2 * npoints + 1
vertices = []
for i in xrange (1, np):
vertices.append ((y0, x0))
if (i // 2) * 2 == i: r = radius
else: r = inner_radius
y1 = yc + r * math.sin (i * angle)
x1 = xc + r * math.cos (i * angle)
if fast: draw_line_fast (im, y0, x0, y1, x1, v)
else: draw_line (im, y0, x0, y1, x1, v)
y0 = y1
x0 = x1
if fill: fill_outline (im, yc, xc, v)
return vertices
#-------------------------------------------------------------------------------
def draw_text (im, text, y, x, v=max_image_value, align="c"):
"""
Write text onto an image.
Arguments:
im image upon which the text is to be written (modified)
text string of characters to be written onto the image
y y-value (row) at which the text is to be written
x x-value (column) at which the text is to be written
v value to which pixels in the text are to be set (default: 255.0)
align alignment of the text, one of (default: 'c')
'c': centred
'l': left-justified
'r': right-justified
This routine is based on C code kindly provided by Nick Glazzard of
Speedsix.
"""
global character_height, character_width, character_bitmap
# Work out the start position on the image depending on the text alignment.
if align == 'l' or align == 'L':
offset = 0
elif align == 'r' or align == 'R':
offset = - character_width * len(text)
else:
offset = - character_width * len(text) // 2
# Draw each character in turn.
ny, nx, nc = sizes (im)
for c in text:
for row in xrange(character_height-1,-1,-1):
yy = y-row
if yy >= 0 and yy < ny:
b = character_bitmap[c][row]
for col in xrange (character_width-1,-1,-1):
if b & (1<<col):
xx = x+(7-col)+offset
if xx >= 0 and xx < nx:
im[yy,xx,:] = v
offset += character_width
#-------------------------------------------------------------------------------
def examine (im, name="", format="%3.0f", lformat=None, ff=False, fd=sys.stdout,
ylo=0, xlo=0, yhi=None, xhi=None, clo=0, chi=None):
"""
Output an image in human-readable form.
Arguments:
im image whose pixels are to be output
name name of the image (default : '')
format format used for writing pixels (default: '%3.0f')
lformat format used for column and row numbers (default: contextual)
ff if True, output a form-feed before the output (default: False)
fd file on which output is to be written (default: sys.stdout)
ylo lower y-value of the region to be output (default: 0)
xlo lower x-value of the region to be output (default: 0)
yhi upper y-value of the region to be output (default: last pixel)
xhi upper x-value of the region to be output (default: last pixel)
clo first channel of the region to be output (default: 0)
chi last channel of the region to be output (default: last channel)
ff if True, output a form feed before the image (default: False)
"""
# Work out the width of a printed pixel by setting "0". We use that to
# determine lformat, unless set explicitly by the caller.
width = len (format % 0.0)
if lformat is None: lformat = "%%%dd" % width
# Print the introduction.
ny, nx, nc = sizes (im)
py = px = pc = "s"
if ny == 1: py = ""
if nx == 1: px = ""
if nc == 1: pc = ""
if ff: ffc = ''
else: ffc = ''
if name != "": name += " "
print >>fd, ffc + \
("Image %sis %d line%s, %d pixel%s/line, %d channel%s ") \
% (name, ny, py, nx, px, nc, pc)
# Print the element numbers across the top and a line.
if yhi is None: yhi = ny
if xhi is None: xhi = nx
if chi is None: chi = nc
nyl = yhi - ylo
nxl = xhi - xlo
sep = ' ' * width + '+' + '-' * (width+1) * nxl + '-+'
print >>fd, ' ' * (width+1),
for x in xrange (xlo, xhi):
print >>fd, lformat % (x),
print >>fd, ""
print >>fd, sep
# Print the pixels of the rows, with the channels above each other.
for y in xrange (ylo, yhi):
for c in xrange (clo, chi):
if c == clo:
print >>fd, lformat % (y) + '|',
else:
print >>fd, len (lformat % (y)) * ' ' + '|',
for x in xrange (xlo, xhi):
print >>fd, format % (im[y,x,c]),
print >>fd, "|"
print >>fd, sep
#-------------------------------------------------------------------------------
def effect_drawing (im, blursize=17, opacity=0.9):
"""
Convert an image into a 'drawing' and return it.
Arguments:
im image to be converted into a 'drawing'
blur size of the square mask to be used for blurring the image
(default: 17)
opacity the opacity to be used when blending the blur with
the original (default: 0.9)
"""
ny, nx, nc = sizes (im)
if nc > 1: im1 = mono (im)
else: im1 = copy (im)
# Invert the contrast.
hi = max (im1)
im2 = hi - im1
# Blur the image.
blurmask = image ((blursize, blursize, 1))
set (blurmask, 1.0)
convolve (im2, blurmask, 'mean')
# Blend the layers, clipping the result to keep it sensible.
fac = opacity / max (im2)
im2 = im1 / (1.0 - im2 * fac)
clip (im2, 0.0, max_image_value)
return im2
#-------------------------------------------------------------------------------
def effect_sepia (im):
"""
Make an image have a sepia appearance.
Arguments:
im image to be made sepia (modified)
"""
r = get_channel (im, 0)
g = get_channel (im, 1)
b = get_channel (im, 2)
rr = 0.393 * r + 0.769 * g + 0.189 * b
gg = 0.349 * r + 0.686 * g + 0.168 * b
bb = 0.272 * r + 0.534 * g + 0.131 * b
set_channel (im, 0, rr)
set_channel (im, 1, gg)
set_channel (im, 2, bb)
clip (im, 0.0, max_image_value)
#-------------------------------------------------------------------------------
def effect_streaks (im, width=1, height=6, direction='h', occ=0.9, fg=0.0,
bg=max_image_value):
"""
Return a representation of an image as horizontal or vertical streaks.
Convert an image into a series of two-level streaks, the width of the
streak indicating the darkness of that part of the original image. The
inspiration for the routine is the illustrations in the book 'The
Cloudspotter's Guide' by Gavin Pretor-Pinny.
Arguments:
im image to be processed
width width of each region to be processed (default: 1)
height width of each region to be processed (default: 6)
direction direction in which the streaks will go (default: 'h')
'h': horizontal
'v': vertical
occ maximum occupancy of each region (default: 0.9)
fg foreground value (0.0)
bg background value (255.0)
"""
# Produce single-channel output, even from a multi-channel input image.
ny, nx, nc = sizes (im)
to = image ((ny, nx, 1))
# Get the image range and handle the case when the image is blank.
lo, hi = extrema (im)
if lo >= hi:
set (to, 0.0)
return to
# Process the image in regions of width x height pixels. For each region,
# we calculate its mean, then determine what proportion of that region
# should be filled with the foreground value. We then set the entire
# region to the background value, and finally fill the relevant proportion
# of the region with the foreground value.
# There are two aesthetic refinements to this process. The first is that
# we incorporate a contrast reversal if the foreground value is darker
# than the background one. The second is that we reduce the proportion of
# the region that is filled by an occupancy factor as this makes the end
# result look better.
if direction == 'v' or direction == 'V':
horiz = False
fac = width * occ / (hi - lo)
else:
horiz = True
fac = height * occ / (hi - lo)
for y in xrange (0, ny, height):
yto = y + height
if yto > ny: yto = ny
for x in xrange (0, nx, width):
xto = x + width
if xto > nx: xto = nx
reg = region (im, y, yto, x, xto)
lo, hi = extrema (reg)
if bg > fg:
val = int((hi - mean(reg)) * fac + 0.5)
else:
val = int((mean (reg) - lo) * fac + 0.5)
if val < 0: val = 0
set_region (to, y, x, yto, xto, bg)
if horiz:
set_region (to, y, x, y + val, xto, fg)
else:
set_region (to, y, x, yto, x + val, fg)
return to
#-------------------------------------------------------------------------------
def effect_solarize (im, threshold=None):
"""
Solarize an image. (UNTESTED)
Arguments:
im image to be solarized
threshold threshold above which the effect is applied
"""
value = max (im)
if threshold is None: threshold = value / 2.0
ny, nx, nc = sizes (im)
for y in xrange (0, ny):
for x in xrange (0, nx):
for c in xrange (0, nc):
if im[y,x,c] < threshold: im[y,x,c] = value - im[y,x,c]
#-------------------------------------------------------------------------------
def extract (im, ry, rx, yc, xc, step=1.0, angle=0.0, wrap=False, val=0,
interpolator='gradient'):
"""
Return a ry x rx-pixel region of im centred at (yc, xc).
Arguments:
im image from which the region is to be extracted
ry number of pixels in the y-direction of the extracted region
rx number of pixels in the x-direction of the extracted region
yc y-position of the centre of the region to be extracted
xc x-position of the centre of the region to be extracted
step step size on im (default: 1.0)
or a list [ystep, xstep]
angle angle of sampling grid relative to im, measured anticlockwise
in radian (default: 0.0)
wrap if True, 'falling off' one size of the image will wrap
around to the opposite side (otherwise, such pixels will be
zero) (default: False)
val value to which pixels outside the image are set if not
wrapping (default: 0)
interpolation interpolation scheme, one of 'gradient', 'bilinear' or
'nearest' (default: 'gradient')
The region extracted from im can be centred around a non-integer
position in im, and extracted with an arbitrary step size at an
arbitrary angle. The interpolation schemes supported are either
conventional bilinear or a gradient-based one described in
P.R. Smith (Ultramicroscopy vol 6, pp 201--204, 1981), as well as
simple nearest neighbour.
"""
if isinstance (step, list):
ystep, xstep = step
else:
ystep = xstep = step
ny, nx, nc = sizes (im)
region = image ((ry, rx, nc))
y0 = yc - ry / 2
x0 = xc - rx / 2
if abs (angle) < tiny and \
abs (ystep - 1.0) < tiny and abs (xstep - 1.0) < tiny and \
ry < ny and rx < nx and y0 >= 0 and x0 >= 0 and \
abs (y0 - int(y0)) < tiny and abs (x0 - int(x0)) < tiny:
region = im[y0:y0+ry,x0:x0+rx]
else:
mim = im
if interpolator == 'gradient':
interp = 2
mim = mono (im)
elif interpolator == 'bilinear': interp = 3
elif interpolator == 'nearest': interp = 1
else:
print >>sys.stderr, ('extract: invalid interpolator "%s"; ' + \
'using "nearest"') % interpolator
interp = 1
cosfac = math.cos (-angle)
sinfac = math.sin (-angle)
disty = ry / 2
distx = rx / 2
yst = yc + distx * xstep * sinfac - disty * ystep * cosfac
xst = xc - distx * xstep * cosfac - disty * ystep * sinfac
for y in xrange (0, ry):
ypos = yst
xpos = xst
for x in xrange (0, rx):
if (ypos < 0 or ypos >= ny or xpos < 0 or xpos >= nx) \
and not wrap:
region[y,x] = val
else:
ylo = int (ypos)
dy = ypos - ylo
dy1 = 1 - dy
ylo = (ylo + ny) % ny
yhi = ylo + 1
if yhi >= ny and not wrap:
region[y,x] = val
else:
yhi = (yhi + ny) % ny
xlo = int (xpos)
dx = xpos - xlo
dx1 = 1 - dx
xlo = (xlo + nx) % nx
xhi = xlo + 1
if xhi >= nx and not wrap:
region[y,x] = val
else:
xhi = (xhi + nx) % nx
if interp == 3:
region[y,x] = \
dy *dx*im[yhi,xhi] + dy *dx1*im[yhi,xlo] + \
dy1*dx*im[ylo,xhi] + dy1*dx1*im[ylo,xlo]
elif interp == 2:
if abs (mim[ylo,xlo] - mim[yhi,xhi]) > \
abs (mim[yhi,xlo] - mim[ylo,xhi]):
region[y,x] = (dx-dy) * im[ylo,xhi] + \
dx1*im[ylo,xlo] + dy*im[yhi,xhi]
else:
region[y,x] = (dx1-dy) * im[ylo,xlo] + \
dx*im[ylo,xhi] + dy*im[yhi,xlo]
else:
region[y,x] = im[int(ylo+0.5),int(xlo+0.5)]
ypos -= ystep * sinfac
xpos += xstep * cosfac
yst += ystep * cosfac
xst += xstep * sinfac
return region
#-------------------------------------------------------------------------------
def extrema (im):
"""
Return the minimum and maximum of an image.
Arguments:
im image whose extrema are to be found
"""
return [im.min(), im.max()]
#-------------------------------------------------------------------------------
def fill_outline (im, y, x, v=max_image_value, threshold=100):
"""
Flood fill the region lying within a border.
Arguments:
im image containing region to be filled (modified)
yc y-coordinate of point at which filling is to start
xc x-coordinate of point at which filling is to start
v value to which the filled region is to be set (default: 255)
threshold minimum difference in value from centre pixel at boundary
(default: 100)
This code is based on that written by Eric S. Raymond at
http://mail.python.org/pipermail/image-sig/2005-September/003559.html
This code is an elegant Python implementation of Paul Heckbert's
classic flood-fill algorithm, presented in"Graphics Gems".
"""
ny, nx, nc = sizes (im)
if x < 0 or x >= nx or y < 0 or y >= ny: return
vc = im[y,x].sum()
if abs(vc - v) < threshold: return
im[y,x] = v
# At each step there is a list of edge pixels for the flood-filled
# region. Check every pixel adjacent to the edge; for each, if it is
# eligible to be coloured, colour it and add it to a new edge list.
# Then you replace the old edge list with the new one. Stop when the
# list is empty.
edge = [(y, x)]
while edge:
newedge = []
for (y, x) in edge:
for (t, s) in ((y, x+1), (y, x-1), (y+1, x), (y-1, x)):
if s >= 0 and s < nx and t >= 0 and t < ny and \
abs(im[t,s].sum() - vc) < threshold:
im[t,s] = v
newedge.append ((t, s))
edge = newedge
#-------------------------------------------------------------------------------
def find_in_path (prog):
"""
Return the absolute pathname of a program which is in the search path.
Arguments:
prog program whose absolute filename is to be found
"""
# First, split the PATH variable into a list of directories, then find
# the first program from our list that is in the path.
path = string.split(os.environ['PATH'], os.pathsep)
for p in path:
fp = os.path.join(p, prog)
if os.path.exists(fp): return os.path.abspath(fp)
return None
#-------------------------------------------------------------------------------
def find_peaks (im, threshold):
"""
Return a list of the peaks in an image in descending order of height.
A peak is defined as a pixel whose value is larger than those of all
surrounding pixels and has a value greater than threshold. Each
peak is described by a three-element list containing its pixel value
and the y- and x-values at which the peak was found.
Arguments:
im image whose peaks are to be found
threshold value used for determining which peaks are significant
"""
ny, nx, nc = sizes (im)
peaks = list ()
for y in xrange (1, ny-1):
for x in xrange (1, nx-1):
if im[y,x,0] > im[y-1,x-1,0] \
and im[y,x,0] > im[y-1,x ,0] \
and im[y,x,0] > im[y-1,x+1,0] \
and im[y,x,0] > im[y ,x-1,0] \
and im[y,x,0] > im[y ,x+1,0] \
and im[y,x,0] > im[y+1,x-1,0] \
and im[y,x,0] > im[y+1,x ,0] \
and im[y,x,0] > im[y+1,x+1,0] \
and im[y,x,0] > threshold:
peaks.append ([im[y,x,0], y, x])
# Return the peaks sorted into descending order.
peaks.sort (reverse=True)
return peaks
#-------------------------------------------------------------------------------
def find_skin (im, hlo=300, hhi=30, slo=10, shi=70, vlo=10, vhi=80, ishsv=False):
"""
Return a binary mask identifying pixels that are likely to be skin.
This routine identifies potential skin pixels in the image im. The
image is converted to HSV format unless ishsv is True, and then
pixels in the HSV region bounded by [hlo:hhi], [slo:shi] and
[vlo:vhi] are identified as being skin. As skin is vaguely red, and
red in HSV space is 0, hlo will normally be about 330 (degrees) and
hhi about 30 degrees. The returned image has non-zero pixels where
skin has been identified.
Note that this is not a reliable skin detector it can be confused
by incandescent lighting or by similarly-coloured materials such as
wood.
Arguments:
im image in which skin regions are to be found
hlo lowest skin hue (default: 300)
hhi highest skin hue (default: 30)
slo lowest skin saturation (default: 10)
shi highest skin saturation (default: 70)
vlo lowest skin value (default: 10)
vhi highest skin value (default: 80)
ishsv if True, the input image contains pixels in HSV format
rather than RGB (default: False)
"""
return segment_hsv (im, hlo, hhi, slo, shi, vlo, vhi, ishsv)
#-------------------------------------------------------------------------------
def find_threshold_otsu (im):
"""
Return the optimal image threshold, found using Otsu's method.
This routine is minimally adapted from the code in 'ImageP.py' by
Tamas Haraszti, which he says is ultimately derived from Octave code
written by Barre-Piquot. I note, in passing, that this facility is
also available as part of Matlab, though I've never seen the code.
The algorithm itself is N. Otsu: 'A Threshold Selection Method from
Gray-Level Histograms', IEEE Transactions on Systems, Man and
Cybernetics vol 9 no 1 pp 62-66 (1979).
Arguments:
im image for which the threshold is to be found
"""
vals = im.copy ()
mn = vals.min ()
vals = vals - mn
N = int (vals.max ())
h, x = numpy.histogram (vals, bins=N)
h = h / (h.sum() + 1.0)
w = h.cumsum ()
i = numpy.arange (N, dtype=float) + 1.0
mu = numpy.zeros (N, float)
mu = (h*i).cumsum()
w1 = 1.0 - w
mu0 = mu / w
mu1 = (mu[-1] - mu) / w1
s = w * w1 * (mu1 - mu0)**2
return float ((s == s.max()).nonzero()[0][0]) + mn
#-------------------------------------------------------------------------------
def fourier (im, forward=True):
"""
Perform a Fourier transform of an image.
Note that this routine leaves the zero frequency in the centre of
the image.
Arguments:
im image to be transformed (modified)
forward if True, preform a forward transform (default: True)
"""
if forward:
# Ensure we have a complex image for the result.
dims = sizes (im)
res = image (dims, type=numpy.complex64)
res = im
# Transform, then move the origin to the centre of the image.
temp = numpy.fft.fft2 (im, axes=(-3,-2))
res = numpy.fft.fftshift (temp, axes=(-3,-2))
else:
# Move the origin from the centre to the corner, then transform.
temp = numpy.fft.ifftshift (im, axes=(-3,-2))
res = numpy.fft.ifft2 (temp, axes=(-3,-2))
return res
#-------------------------------------------------------------------------------
def get_channel (im, c):
"""
Return a channel of an image.
Arguments:
im the image from which the channel is to be extracted
c the index of the channel that is to be extracted
"""
ny, nx, nc = sizes (im)
ch = image ((ny, nx, 1))
ch[:,:,0] = im[:,:,c]
return ch
#-------------------------------------------------------------------------------
def grab (prog=None, suffix=''):
"""
Return an image grabed using the computer's camera.
Arguments:
prog name of the program with which to capture the image; by default
this is one of:
isightcapture (MacOS X)
streamer (Linux)
suffix the suffix of the temporary file in whch the image is captured
(by default, this depends on the capture program used)
"""
if prog is None:
if find_in_path ('isightcapture'):
fn = tempfile.mkstemp (suffix='.png')[1]
os.system ('isightcapture -t png ' + fn)
elif find_in_path ('streamer'):
fn = tempfile.mkstemp (suffix='.ppm')[1]
os.system ('streamer -q -f ppm -o ' + fn)
else:
fn = tempfile.mkstemp (suffix=suffix)[1]
os.system (prog + ' ' + fn)
pic = image (fn)
os.remove (fn)
return pic
#-------------------------------------------------------------------------------
def graph_gnuplot (x, y, title=' ', xlabel='x', ylabel='y',
logx=False, logy=False,
style='lines', key=None, pause=True):
"""
Graph data using Gnuplot.
Arguments:
x a list of values to form the abscissa
y either a list of values to be plotted on the ordinate axis
or a list of lists of values to be plotted as a series of
separate curves
title the title of the graph
xlabel the text used to annotate the abscissa
ylabel the text used to annotate the ordinate
logx if True, make the x-axis logarithmic (default: False)
logy if True, make the y-axis logarithmic (default: False)
style the method used to plot the data, a valid Gnuplot line-type
or 'histogram' (default: 'lines')
key if supplied, a list of the same length as the number of plots
giving the name for each curve (default: None)
pause if True, allow the user to view the plot (and optionally save
the data to file) before continuing (default: True)
"""
# Work out if we're producing one or several plots.
if isinstance (y, list): nydims = 1
else: nydims = len (y.shape)
if nydims == 1:
ny = len (y)
else:
nydims = y.shape[0]
ny = len (y[0])
if x is None:
x = numpy.ndarray ((ny))
for ix in xrange (0, ny):
x[ix] = ix
p = os.popen ('gnuplot', 'w')
if key is None: print >>p, 'set nokey'
print >>p, 'set grid'
print >>p, 'set title "' + title + '"'
print >>p, 'set xlabel "' + xlabel + '"'
print >>p, 'set ylabel "' + ylabel + '"'
if logx: print >>p, 'set log x'
if logy: print >>p, 'set log y'
if style == 'histogram':
print >>p, 'set style fill solid'
extra = 'with boxes'
else:
print >>p, 'set style data ' + style
extra = ''
print >>p, 'plot',
for dataset in xrange (0, nydims-1):
print >>p, '"-"',
if not key is None: print >>p, ('title "%s"' % key[dataset]),
print >>p, ',',
print >>p, '"-"',
if not key is None: print >>p, ('title "%s' % key[nydims-1])
print >>p, extra
# We access the contents of y differently if we're producing a single plot
# of a set of plots.
if nydims == 1:
for ix, iy in zip (x, y):
print >>p, ix, iy
print >>p, 'e'
else:
for dataset in xrange (0, nydims):
for ix, iy in zip (x, y[dataset,:]):
print >>p, ix, iy
print >>p, 'e'
p.flush ()
# Exit if the user types <EOF>; give (minimal) instructions if they type
# "?"; simply continue if they type <return>. Anything else typed in
# response to the prompt is assumed to be a filename and we save the data
# that file. (And yes, that can result in silly filenames...)
if pause: looping = True
else: looping = False
while looping:
sys.stderr.write ('CR> ')
fn = sys.stdin.readline()
if len(fn) < 1:
print >>sys.stderr, "Exiting..."
sys.exit (1)
if len(fn) > 0 and fn == '?\n':
print >>sys.stderr, 'fn to save data to "fn" else <return>'
continue
if len(fn) > 1 and fn != '':
f = open (fn[:-1],'w')
for ix, iy in zip(x, y):
print >>f, ix, iy
f.close ()
looping = False
p.close ()
#-------------------------------------------------------------------------------
def graph_pgfplots (x, y, fn, title=' ', xlabel='x', ylabel='y', logx=False,
logy=False, style='lines', key=None, preamble=True):
"""
Graph data using LaTeX's PGFplot style file.
Arguments:
x a list of values to form the abscissa
y either a list of values to be plotted on the ordinate axis
or a list of lists of values to be plotted as a series of
separate curves
fn the name of the file to receive the LaTeX commands for the plot
title the title of the graph
xlabel the text used to annotate the abscissa
ylabel the text used to annotate the ordinate
logx if True, make the x-axis logarithmic (default: False)
logy if True, make the y-axis logarithmic (default: False)
style the method used to plot the data, a valid Gnuplot line-type
or 'histogram' (default: 'lines')
key if supplied, a list of the same length as the number of plots
giving the name for each curve (default: None)
preamble if True, write out the document preamble at the top of the file
"""
# Preparation for plotting.
if isinstance (y, list): nydims = 1
else: nydims = len (y.shape)
if nydims == 1:
ny = len (y)
else:
nydims = y.shape[0]
ny = len (y[0])
if x is None:
x = numpy.ndarray ((ny))
for ix in xrange (0, ny):
x[ix] = ix
# Open the file and write out the preamble.
f = open (fn, "w")
if preamble: print >>f, r"""
%\usepackage{pgfplots} % <-- in the document preamble
\pgfplotsset{compat=newest}
\pgfplotsset{eve/.style={
y tick label style={
/pgf/number format/.cd,
fixed,
fixed zerofill,
precision=1,
/tikz/.cd
},
x tick label style={
/pgf/number format/.cd,
fixed,
fixed zerofill,
precision=1,
/tikz/.cd
},
tick label style = {font=\sffamily\small},
every axis label = {font=\sffamily\small},
legend style = {font=\sffamily},
label style = {font=\sffamily\small}}
}"""
# Work out the line style.
if style == "lines": mark = "none"
else: mark = "*"
# Work out the axis type and write out the beginning of the plot.
if logx and logy: axis = "loglogaxis"
elif logx: axis = "semilogxaxis"
elif logx: axis = "semilogyaxis"
else: axis = "axis"
print >>f, r"\begin{figure}"
print >>f, r" \begin{center}"
print >>f, r" \begin{tikzpicture}"
print >>f, r" \begin{%s}[eve, xlabel=%s, ylabel=%s," % \
(axis, xlabel, ylabel)
print >>f, r" width=0.8\textwidth, height=0.45\textheight]"
print >>f, r" \addplot[mark=%s] coordinates {" % mark
# Write out the data. We access the contents of y differently if
# we're producing a single plot of a set of plots.
if nydims == 1:
for ix, iy in zip (x, y):
print >>f, ' (%f, %f)' % (ix, iy)
print >>f, ' };'
else:
for dataset in xrange (0, nydims):
for ix, iy in zip (x, y[dataset,:]):
print >>f, ' (%f, %f)' % (ix, iy)
print >>f, ' };'
# Finish the plot off.
print >>f, r""" \end{%s}""" % axis
print >>f, r""" \end{tikzpicture}
\end{center}
\caption{%s}
\label{fig:%s}
\end{figure}""" % (title, title)
f.close()
#-------------------------------------------------------------------------------
def graph (x, y, title=' ', xlabel='x', ylabel='y', logx=False, logy=False,
style='lines', key=None, pause=True):
"""
Graph data using Matplotlib.
Arguments:
x a list of values to form the abscissa
y either a list of values to be plotted on the ordinate axis
or a list of lists of values to be plotted as a series of
separate curves
title the title of the graph
xlabel the text used to annotate the abscissa
ylabel the text used to annotate the ordinate
logx if True, make the x-axis logarithmic (default: False)
logy if True, make the y-axis logarithmic (default: False)
style the method used to plot the data, a valid Gnuplot line-type
or 'histogram' (default: 'lines')
key if supplied, a list of the same length as the number of plots
giving the name for each curve (default: None)
pause if True, allow the user to view the plot (and optionally save
the data to file) before continuing (default: True)
"""
import pylab as p
# Work out if we're producing one or several plots.
if isinstance (y, list): nydims = 1
else: nydims = len (y.shape)
if nydims == 1:
ny = len (y)
else:
nydims = y.shape[0]
ny = len (y[0])
# Maks sure we have some x-values to plot.
if x is None:
x = numpy.ndarray ((ny))
for ix in xrange (0, ny):
x[ix] = ix
# Set up pylab.
p.figure ()
p.grid ()
p.title (title)
p.xlabel (xlabel)
p.ylabel (ylabel)
# We access the contents of y differently if we're producing a single plot
# of a set of plots.
if nydims == 1:
if style == 'histogram': p.bar (x, y, align='center')
else: p.plot (x, y)
else:
lab = None
for dataset in xrange (0, nydims):
if not key is None: lab = key[dataset]
if style == 'histogram':
p.bar (x, y[dataset,:], label=lab, align='center')
else:
p.plot (x, y[dataset,:], label=lab)
if not key is None: p.legend ()
p.show ()
# Exit if the user types <EOF>; give (minimal) instructions if they type
# "?"; simply continue if they type <return>. Anything else typed in
# response to the prompt is assumed to be a filename and we save the data
# that file. (And yes, that can result in silly filenames...)
if pause: looping = True
else: looping = False
while looping:
sys.stderr.write ('CR> ')
fn = sys.stdin.readline()
if len(fn) < 1:
print >>sys.stderr, "Exiting..."
sys.exit (1)
if len(fn) > 0 and fn == '?\n':
print >>sys.stderr, 'fn to save data to "fn" else <return>'
continue
if len(fn) > 1 and fn != '':
f = open (fn[:-1],'w')
for ix, iy in zip(x, y):
print >>f, ix, iy
f.close ()
looping = False
#-------------------------------------------------------------------------------
def harris (im, min_distance=10, threshold=0.1, inc=2, disp=False):
'''
Return corners found in an image using the Harris-Stephens detector.
Note that this routine is currently much too naive to be used in anger!
Arguments:
im image to be processed
min_distance minimum number of pixels separating corners and
image boundary (default: 10)
threshold minimum response for a pixel to be considered a corner
(default: 0.1)
inc the increment between pixels when sub-sampling (default: 2)
disp if set, display the corners on a darkened copy of the image
'''
full_corners = harris_corners (im)
full_corners.sort ()
im2 = subsample (im, inc)
half_corners = harris_corners (im2)
half_corners.sort ()
corners = []
for i in xrange (0, len(full_corners)):
fy, fx = full_corners[i]
hy, hx = half_corners[i]
y = fy + (fy - inc * hy)
x = fx + (fx - inc * hx)
corners.append ((y,x))
# Display the corners we've found, if the caller wants to.
if disp:
mim = copy (im)
mim *= 0.4
mark_positions (mim, corners, disp=True)
return corners
#-------------------------------------------------------------------------------
def harris_corners (im, min_distance=10, threshold=0.1):
'''
Detect corners in an image using the Harris-Stephens detector.
Note that this routine returns corners with a systematic error
inherent in the detector; a wrapper routine, harris, processes the
image at two scales to remove the bias.
This routine is adapted from code written by Jan Erik Solem; see
http://www.janeriksolem.net/2009/01/harris-corner-detector-in-python.html
Arguments:
im image to be processed
min_distance minimum number of pixels separating corners and
image boundary (default: 10)
threshold minimum response for a pixel to be considered a corner
(default: 0.1)
'''
import scipy
from scipy import signal
# Ensure the image is monochrome.
ny, nx, nc = sizes (im)
if nc > 1: mim = mono (im)
else: mim = copy (im)
# Calculate the Gaussian kernel and its derivatives. Using them, compute
# the components of the structure tensor, and from them calculate the
# determinant and trace; the ratio of these gives the response. We
# add a tiny amount in the final expression to avoid problems in
# homogeneous regions in the subsequent division. I think Alison Noble
# was the first person to use this particular work-around see her DPhil
# thesis (from the robotics group at Oxford) but it's a fairly obvoius
# thing to do.
size = 3
y, x = numpy.mgrid[-size:size+1, -size:size+1]
gauss = numpy.exp(-(x**2/float(size) + y**2/float(size)))
gauss /= gauss.sum()
# Calculate the x and y derivatives of a 2D gaussian with standard
# deviation half of its size.
gx = - x * numpy.exp(-(2.0*x/size)**2 - (2.0*y/size)**2)
gy = - y * numpy.exp(-(2.0*x/size)**2 - (2.0*y/size)**2)
imx = signal.convolve (im[:,:,0], gx, mode='same')
imy = signal.convolve (im[:,:,0], gy, mode='same')
Wxx = scipy.signal.convolve (imx*imx, gauss, mode='same')
Wxy = scipy.signal.convolve (imx*imy, gauss, mode='same')
Wyy = scipy.signal.convolve (imy*imy, gauss, mode='same')
harrisim = (Wxx * Wyy - Wxy**2) / (Wxx + Wyy + tiny)
# Find the top corner candidates above the threshold.
corner_threshold = max (harrisim.ravel()) * threshold
harrisim_t = (harrisim > corner_threshold) * 1
# Get the coordinates of candidate corners and their values, then sort them.
cands = harrisim_t.nonzero()
coords = [(cands[0][c], cands[1][c]) for c in xrange(len(cands[0]))]
candidate_values = [harrisim[c[0]][c[1]] for c in coords]
index = scipy.argsort(candidate_values)
# Store allowed point locations in an array then select the best points,
# taking min_distance into account.
allowed_locations = numpy.zeros(harrisim.shape)
allowed_locations[min_distance:-min_distance,min_distance:-min_distance] = 1
corners = []
for i in index:
if allowed_locations[coords[i][0]][coords[i][1]] == 1:
corners.append(coords[i])
allowed_locations[(coords[i][0] - min_distance):\
(coords[i][0] + min_distance),\
(coords[i][1] - min_distance):\
(coords[i][1] + min_distance)] = 0
return corners
#-------------------------------------------------------------------------------
def high_peaks (peaks, factor=0.5):
"""
Given a sorted list of peaks, return those within factor of the highest.
Arguments:
peaks list of peaks sorted into descending order
factor peaks of height within factor of the highest are returned
(default: 0.5)
"""
threshold = peaks[0][0] * factor
n = 0
for ht, y, x in peaks:
if ht < threshold: break
n += 1
return peaks[:n]
#-------------------------------------------------------------------------------
def histogram (im, bins=64, limits=None, disp=False):
"""
Find the histogram of an image.
Arguments:
im image for which the histogram is to be found
bins number of bins in the histogram (default: 64)
limits extrema between which the histogram is to be found
(default: calculated from the image)
disp if True, the histogram will be drawn (default: False)
"""
h, a = numpy.histogram (im, bins, limits)
if disp: graph_gnuplot (a, h, 'Histogram', 'bin', 'number of pixels',
style='histogram')
return a, h
#-------------------------------------------------------------------------------
def hough_line (im, nr=512, na=512, yc=None, xc=None, threshold=10,\
disp=False, dispacc=False):
"""
Perform the Hough transform for lines of the image 'im'.
This routine performs a straight-line Hough transform of the image 'im',
which should normally contain output from an edge detector. It returns a
list of the significant peaks found (see find_peaks for a description of
its content) and the image that forms the accumulator. The accumulator is
of dimension [na, nr], the distance from the origin (yc, xc) being plotted
along the x-direction and the corresponding angle along the y-direction.
Arguments:
im image for which the Hough transform is to be performed
nr number of radial values (x-direction of the accumulator)
na number of angle values (y-direction of the accumulator)
yc y-value of the origin on the image array (default: image centre)
xc x-value of the origin on the image array (default: image centre)
threshold minimum value for a significant peak in the accumulator
(default: 10)
disp if True, draw the lines found over the image (default: false)
dispcc if True, display the accumulator array (default: false)
"""
ny, nx, nc = sizes (im)
if yc is None: yc = ny / 2
if xc is None: xc = nx / 2
acc = image ((na, nr, 1))
ainc = math.pi / na
rinc = nr / math.sqrt (ny**2 + nx**2)
# Find edge points and update the Hough array.
for y in xrange (0, ny):
for x in xrange (0, nx):
v = im[y,x,0]
if v > 0:
for a in xrange (0, na):
ang = a * ainc
r = ((x - xc) * math.cos(ang) + (y - yc) * math.sin (ang))
r += ny
if r >= 0 and r < nr:
acc[a,r,0] += 1
# Now find peaks in the accumulator.
peaks = find_peaks (acc, threshold=threshold)
# If the user wants to display what has been found, draw the lines over
# the image. (This implmentation is ugly.)
if dispacc: display (acc)
if disp:
d = image ((ny, nx, 3))
d[:,:,0] = im[:,:,0] * 0.5
d[:,:,1] = im[:,:,0] * 0.5
d[:,:,2] = im[:,:,0] * 0.5
for h, a, r in peaks:
da = a
for y in range (0, ny):
for x in range (0, nx):
t = (x - xc) * math.cos (da) + (y - yc) * math.sin (da)
if abs(t - r) < 1.0e-3: d[y,x,0] = max_image_value
display (d)
return peaks, acc
#-------------------------------------------------------------------------------
def hsv_to_rgb (im):
"""
Convert an image from HSV space to RGB.
This routine converts an image in which the hue, saturation and
value components are in channels 0, 1 and 2 respectively to the RGB
colour space. It is assumed that hue lies in the range [0,359],
while saturation and value are percentages; these are compatible
with the popular display program 'xv'. The red, green and blue
components are returned in channels 0, 1 and 2 respectively, each in
the range [0,255].
This routine is adapted from code written by Frank Warmerdam
<warmerdam@pobox.com> and Trent Hare; see
http://svn.osgeo.org/gdal/trunk/gdal/swig/python/samples/hsv_merge.py
Arguments:
im image to be converted (modified)
"""
h = im[:,:,0] / 360.0
s = im[:,:,1] / 100.0
v = im[:,:,2] * max_image_value / 100.0
i = (h * 6.0).astype(int)
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
im[:,:,0] = i.choose (v, q, p, p, t, v)
im[:,:,1] = i.choose (t, v, v, q, p, p)
im[:,:,2] = i.choose (p, p, t, v, v, q)
#-------------------------------------------------------------------------------
def image (fromwhat, type=numpy.float32):
"""
Create an EVE image.
Arguments:
fromwhat the source from which the image is to be created, one of:
a string: the name of a file to be read in
a numpy array: the new image is a copy of this image
a list or tuple: the dimensions (ny, nx, nc)
type the type of the image to be ceated (default: numpy.float32)
"""
if isinstance (fromwhat, str):
import Image
pic = Image.open (fromwhat)
# Something seems to be broken with at least 16-bit TIFFs...
if pic.mode == "I;16":
temp = numpy.fromstring(pic.tostring(), dtype=numpy.uint16)
im = numpy.asarray (temp, dtype=type)
nc = 1
else:
im = numpy.asarray (pic, dtype=type)
nc = len (pic.getbands ())
nx, ny = pic.size
im.shape = [ny, nx, nc]
elif isinstance (fromwhat, numpy.ndarray):
ny, nx, nc = fromwhat.shape
im = numpy.zeros ((ny, nx, nc))
elif isinstance (fromwhat, list) or isinstance (fromwhat, tuple):
im = numpy.zeros (fromwhat, dtype=type)
else:
raise ValueError, 'Illegal argument type'
return im
#-------------------------------------------------------------------------------
def insert (im, reg, yc, xc, operation='='):
"""
Insert image reg into im, centred at (yc, xc).
Arguments:
im image into which the region is to be inserted (modified)
reg image which is to be inserted
yc y-value of im at which the centre of reg is to be inserted
xc x-value of im at which the centre of reg is to be inserted
operation way in which im is inserted into output, one of
'=' (assign), '+' (add), '-' (subtract), '*' (multiply),
or '/' (divide)
"""
ny, nx, nc = sizes (reg)
ylo = yc - ny // 2
yhi = ylo + ny
xlo = xc - nx // 2
xhi = xlo + nx
if operation == '=': im[ylo:yhi,xlo:xhi,:] = reg
elif operation == '+': im[ylo:yhi,xlo:xhi,:] += reg
elif operation == '-': im[ylo:yhi,xlo:xhi,:] -= reg
elif operation == '*': im[ylo:yhi,xlo:xhi,:] *= reg
elif operation == '/': im[ylo:yhi,xlo:xhi,:] /= reg
else: raise ValueError, 'Invalid operation type'
#-------------------------------------------------------------------------------
def label_regions (im, con8=False):
"""
Given a segmented image, return an image with its regions labelled and
the number of regions found.
Arguments:
im image to be labelled
con8 if True, consider all 8 nearest neighbours
if False, consider only 4 nearest neighbours (default)
"""
import scipy.ndimage
if con8: ele = [[[ 1, 1, 1,], [ 1, 1, 1,], [ 1, 1, 1,]],
[[ 1, 1, 1,], [ 1, 1, 1,], [ 1, 1, 1,]],
[[ 1, 1, 1,], [ 1, 1, 1,], [ 1, 1, 1,]]]
else: ele = None
res, nlabs = scipy.ndimage.measurements.label (im, structure=ele)
return res, nlabs
#-------------------------------------------------------------------------------
def label_regions_slow (im, con8=True):
"""
Given a segmented image, return an image with its regions labelled.
Arguments:
im image to be labelled
con8 if True, consider all 8 nearest neighbours
if False, consider only 4 nearest neighbours
"""
ny, nx, nc = sizes (im)
lab = image ((ny, nx, nc), type=numpy.int32)
vals = [0, 0, 0, 0]
labs = [1, 0, 0, 0]
# The upper left pixel is in region zero.
lastlabel = 0
equiv = [lastlabel]
lab[0,0,0] = lastlabel
# Process the rest of the first row of the image.
y = 0
for x in xrange (1, nx):
if im[y,x,0] != im[y,x-1,0]:
lastlabel += 1
equiv.append (lastlabel)
lab[y,x,0] = lastlabel
# Process the first column of the image.
x = 0
for y in xrange (1, ny):
if im[y,x,0] == im[y-1,x,0]:
lv = lab[y-1,x,0]
else:
lastlabel += 1
equiv.append (lastlabel)
lv = lastlabel
lab[y,x,0] = lv
# Process the remainder of the image.
for y in xrange (1, ny):
y1 = y - 1
for x in xrange (1, nx):
if con8: nv = 4
else: nv = 2
x1 = x - 1
x2 = x + 1
if x2 >= nx - 1 and con8: lastcol = True; nv -= 1
else: lastcol = False
val = im[y,x,0]
# Get the four neighbours' values and labels, taking care not
# to index off the end of the image.
vals[0] = im[y, x1,0]; labs[0] = lab[y, x1,0]
vals[1] = im[y1,x, 0]; labs[1] = lab[y1,x, 0]
if con8:
vals[2] = im[y1,x1,0]; labs[2] = lab[y1,x1,0]
if not lastcol:
vals[3] = im[y1,x2,0]; labs[3] = lab[y1,x2,0]
inreg = False
for i in xrange (0, nv):
if val == vals[i]: inreg = True
if not inreg:
# We're in a new region.
lastlabel += 1
equiv.append (lastlabel)
lv = lastlabel
else:
# We must be in the same region as a neighbour.
matches = []
for i in xrange (0, nv):
if val == vals[i]: matches.append (labs[i])
matches.sort ()
lv = int(matches[0])
for v in matches[1:]:
if equiv[v] > lv:
equiv[v] = lv
elif lv > equiv[v]:
equiv[lv] = equiv[v]
lab[y,x,0] = lv
# Tidy up the equivalence table.
remap = list()
nc = -1
for i in xrange (0, len(equiv)):
if equiv[i] == i:
nc += 1
v = nc
else:
v = i
while equiv[v] != v:
v = equiv[v]
v = remap[v]
remap.append (v)
# Make a second pass through the image, re-labelling the regions, then
# return the labelled image.
for y in xrange (0, ny):
for x in xrange (0, nx):
lab[y,x,0] = remap[lab[y,x,0]]
return lab, max(lab)
#-------------------------------------------------------------------------------
def labelled_region (labim, lab, bg=0.0, fg=max_image_value):
"""
Return a region from a labelled image.
Arguments:
im labelled image
lab the label that defines which region of the image to return
bg value to which pixels outside the region are to be set (default: 0.0)
fg value to which pixels inside the region are to be set (default: 255.0)
"""
im = image (labim)
set (im, bg)
im[numpy.where (labim == lab)] = fg
return im
#-------------------------------------------------------------------------------
def log1 (im):
"""
Add unity to an image and convert to logarithmic scale.
Arguments:
im image
"""
im = numpy.log (im + 1)
return im
#-------------------------------------------------------------------------------
def lut (im, table, stretch=False, limits=None):
"""
Use a lookup table to adjust pixel values.
Arguments:
im image to be adjusted (modified)
table look-up table used to adjust pixel values
stretch if True, the image will first be contrast-stretched
between limits
limits a two-element list containing the minimum and maximum
values to be used for scaling (default: [0, 255])
"""
ny, nx, nc = sizes (im)
ntable = len (table)
if stretch:
if limits is None:
lo = 0.0
hi = max_image_value
else:
lo, hi = extrema (im)
contrast_stretch (im, low=lo, high=hi)
for y in xrange (0, ny):
for x in xrange (0, nx):
for c in xrange (0, nc):
v = im[y,x,c]
if v >= 0 and v < ntable: im[y,x,c] = table[int(v)]
#-------------------------------------------------------------------------------
def mark_at_position (im, y, x, v=max_image_value, symbol='.', size=9):
"""
Plot a marker in an image.
Arguments:
im image in which the positions are to be marked (modified)
y y-position of the centre of the mark
x x-position of the centre of the mark
value value to which peak locations will be set (default: 255)
symbol what is to be plotted, one of (default: '+'):
'.' a single pixel
'+' a vertical cross
'x' a diagonal cross
'o' a 3 x 3 circle
size size of the plotting symbol (default: 9)
"""
half = math.ceil (size / 2)
yy = y - half
xx = x - half
if symbol == '.':
im[y,x] = v
elif symbol == '+':
draw_line_fast (im, yy, x, yy+size+1, x, v)
draw_line_fast (im, y, xx, y, xx+size+1, v)
elif symbol == 'x':
draw_line_fast (im, yy, xx, yy+size+1, xx+size+1, v)
draw_line_fast (im, yy, xx+size+1, yy+size+1, xx, v)
elif symbol == 'o':
im[y-1,x-1] = im[y-1,x] = im[y-1,x+1] = v
im[y,x-1] = im[y,x] = im[y,x+1] = v
im[y+1,x-1] = im[y+1,x] = im[y+1,x+1] = v
else:
raise ValueError, 'Unrecognised plotting point: "' + symbol + '"'
#-------------------------------------------------------------------------------
def mark_features (im, locs, v=255, fac=1.0, fast=False, disp=True, scale=1.0):
"""
Mark the positions, sizes and orientations of features in an image.
Given a list of feature locations such as those returned by SIFT, in
which each element of the list consists of a list of y-position,
x-position, scale and orientation, this routine marks them on the
image im. Each line drawn is drawn with value val and is scaled by
the factor fac. If disp is True, the result is displayed.
Arguments:
im image in which the features are to be drawn (modified)
locs a list of the features to be drawn
fac scale factor for features drawn on im (default: 1.0)
disp if True, the resulting image is displayed (default: True)
scale factor to multiply the image by before marking points (default: 1.0)
"""
im *= scale
fac = 1.0
ny, nx, nc = sizes (im)
for y, x, s, o in locs:
yy = y + fac * s * math.sin (-o)
xx = x + fac * s * math.cos (-o)
if yy < 0: yy = 0
if yy >= ny: yy = ny - 1
if xx < 0: xx = 0
if xx >= nx: xx = nx - 1
if fast: draw_line_fast (im, y, x, yy, xx, v)
else: draw_line (im, y, x, yy, xx, v)
if disp: display (im)
#-------------------------------------------------------------------------------
def mark_matches (im1, im2, loc1, loc2, scores, v=max_image_value, fast=False,
threshold=0.0, number=False, disp=True, name='Matches'):
"""
Draw lines between corresponding match points, returning the result.
Arguments:
im1 first image for which matches have been found
im2 second image for which matches have been found
loc1 feature points found in im1 from SIFT or similar
loc2 feature points found in im2 from SIFT or similar
scores score between features found by match_descriptors
v value with which the line will be drawn (default: 255)
fast if True, lines are drawn for speed rather than appearance
(default: False)
threshold if a score is above threshold, it will be drawn
(default: 0.0)
number if True, add the list element into scores alongside each line
(default: False)
disp if True, the resulting image will be displayed (default: True)
name name passed to eve.display if the image is displayed
(default: 'Matches')
Given im1 and im2, a new image is formed whch displays them side by
side, and then lines are drawn between corresponding points (stored
in loc1 and loc2) for which the corresponding score is greater than
threshold. The routine is intended for displaying the results of
feature found by SIFT and matched by match_descriptors.
"""
ny1,nx1,nc1 = sizes (im1)
ny2,nx2,nc2 = sizes (im2)
ny = ny1 if ny1 > ny2 else ny2
nx = nx1 + nx2
nc = nc1 if nc1 > nc2 else nc2
dim = image ((ny,nx,nc))
dim[0:ny1,0:nx1,0:nc1] = im1
dim[0:ny2,nx1:,0:nc2] = im2
for i in xrange (0, len(scores)):
i1 = scores[i][1]
i2 = scores[i][2]
y1 = loc1[i1,0]
x1 = loc1[i1,1]
y2 = loc2[i2,0]
x2 = loc2[i2,1] + nx1
if fast: draw_line_fast (dim, int(y1), int(x1), int(y2), int(x2), v)
else: draw_line (dim, int(y1), int(x1), int(y2), int(x2), v)
if number:
offset = 3
if x2 + character_width + offset >= nx1 + nx2:
draw_text (dim, ('%s' % i), y2, x2-offset, v, align='r')
else:
draw_text (dim, ('%s' % i), y2, x2+offset, v)
if x1 - character_width - offset <= 0:
draw_text (dim, ('%s' % i), y1, x1+offset, v)
else:
draw_text (dim, ('%s' % i), y1, x1-offset, v, align='r')
if disp: display (dim, name=name)
return dim
#-------------------------------------------------------------------------------
def mark_peaks (im, pos, v=max_image_value, disp=False, scale=1.0,
symbol='+', size=9, name='Peaks'):
"""
Mark the positions of peaks in an image, such as those returned by
find_peaks().
Arguments:
im image in which the peak positions are to be marked (modified)
pos list of peaks, each element itself a list of [height, y, x]
v value to which peak locations will be set (default: 255)
disp if True, display the marked-up image
scale multiply the image by the factor before marking points
symbol what is to be plotted, one of (default: '+'):
'.' a single pixel
'+' a vertical cross
'x' a diagonal cross
'o' a 3 x 3 blob
size size of the plotting symbol (default: 9)
name name for eve.display if the image is displayed (default: 'Peaks')
"""
im *= scale
for ht, y, x in pos:
mark_at_position (im, y, x, v, symbol, size)
if disp: display (im, name=name)
#-------------------------------------------------------------------------------
def mark_positions (im, pos, v=max_image_value, disp=False, scale=1.0,
symbol='+', size=9, name='Positions'):
"""
Mark positions in an image.
Arguments:
im image in which the positions are to be marked (modified)
pos list of positions, each element itself a list of [y, x]
v value to which peak locations will be set (default: 255)
disp if True, display the marked-up image (default: False)
scale multiply the image by the factor before marking points
symbol what is to be plotted, one of (default: '+'):
'.' a single pixel
'+' a vertical cross
'x' a diagonal cross
size size of the plotting symbol (default: 9)
name name for eve.display if the image is displayed
(default: 'Positions')
"""
im *= scale
for y, x in pos:
mark_at_position (im, y, x, v, symbol, size)
if disp: display (im, name=name)
#-------------------------------------------------------------------------------
def max (im):
"""
Return the maximum of an image.
Arguments:
im image for which the maximum value is to be found
"""
return im.max()
#-------------------------------------------------------------------------------
def match_descriptors_euclidean (desc1, desc2):
"""
Given pairs of descriptors from SIFT or similar, return the Euclidean
distances between all pairs, sorted into ascending order.
Arguments:
desc1 first set of descriptors
desc2 second set of descriptors
"""
score = []
for i1 in xrange (0, len(desc1)):
d1 = desc1[i1]
for i2 in xrange (0, len(desc2)):
d2 = desc2[i2]
s = ((d1 - d2)**2).sum()
score.append ([s, i1, i2])
score.sort()
return score
#-------------------------------------------------------------------------------
def match_descriptors (d1, d2, factor=0.6):
"""
Given pairs of normalized descriptors from SIFT or similar, return
their best matches sorted into ascending order of match score.
The match score is calculated as follows. For each descriptor in
d1, the scalar product is calculated with all descriptors in d2
and the best value (smallest angle between scalar products) taken.
If that value is greater than factor of the second-best value, the
match is considered ambiguous and discarded; otherwise, triplet of
the score and the indices into d1 and d2 are inserted into a list
of scores. When all possible combinations of d1 and d2 have been
considered, that list is sorted into ascending order and returned.
Arguments:
d1 first set of descriptors
d2 second set of descriptors
factor largest permissible value for a match (default: 0.6)
"""
nd = d1.shape[0]
score = []
for i in xrange (0,nd):
inprod = numpy.dot (d1[i], d2.T)
inprod[numpy.where (inprod > 1.0)] = 1.0
inprod[numpy.where (inprod < -1.0)] = -1.0
angles = numpy.arccos (inprod)
ix = numpy.argsort (angles)
if angles[ix[0]] < factor * angles[ix[1]]:
score.append ([angles[ix[0]], i, ix[0]])
score.sort()
return score
#-------------------------------------------------------------------------------
def mean (im):
"""
Return the mean of an image.
Arguments:
im image for which the mean value is to be found
"""
ny, nx, nc = sizes (im)
return numpy.sum (im) / (ny * nx * nc)
#-------------------------------------------------------------------------------
def min (im):
"""
Return the minimum of an image.
Arguments:
im image for which the minimum value is to be found
"""
return im.min()
#-------------------------------------------------------------------------------
def modulus_squared (im):
"""
Form the squared modulus of each pixel of an image.
This routine forms the squared modulus of each pixel of an image,
usually to form the power spectrum of a Fourier transform.
Arguments:
im image for which the power spectrum is to be formed (modified)
"""
t = im * numpy.conj(im)
return t.real
#-------------------------------------------------------------------------------
def mono (im):
"""
Average the channels of colour image to give a monochrome one, returning
the result.
Arguments:
im image to be converted to monochrome
"""
ny, nx, nc = sizes (im)
monoim = image ([ny, nx, 1])
for c in xrange (0, nc):
monoim[:,:,0] += im[:,:,c]
monoim /= nc
return monoim
#-------------------------------------------------------------------------------
def mono_to_rgb (im):
"""
Produce a three-channel image of a monochrome image, returning the result.
Arguments:
im image to be converted to monochrome
"""
ny, nx, nc = sizes (im)
cim = image ([ny, nx, 3])
cim[:,:,0] = im[:,:,0]
cim[:,:,1] = im[:,:,0]
cim[:,:,2] = im[:,:,0]
return cim
#-------------------------------------------------------------------------------
def mse (im1, im2):
"""
Return the mean-squared error (mean-squared difference) between two
images.
Arguments:
im1 image form which im2 is to be subtracted
im2 image to be subtracted from im1
"""
ny, nx, nc = sizes (im1)
return ssd (im1, im2) / float(ny * nx * nc)
#-------------------------------------------------------------------------------
def output (im, fn):
"""
Output an image to a file, the format being determined by its
extension.
Arguments:
im image to be output
fn name of the file to be written, ending in:
".jpg" for JPEG format
".png" for PNG format
".pnm" or ".pgm" or ".ppm" for PBMPLUS format
"""
extn = fn[-3:]
if extn == 'jpg': output_pil (im, fn, 'JPEG')
elif extn == 'png': output_pil (im, fn, 'PNG')
elif extn == 'bmp': output_pil (im, fn, 'BMP')
elif extn == 'pnm': output_pnm (im, fn)
elif extn == 'pgm': output_pnm (im, fn)
elif extn == 'ppm': output_pnm (im, fn)
else:
ValueError, 'Unsupported file extension'
#-------------------------------------------------------------------------------
def output_bmp (im, fn):
"""
Output an image to a file in BMP format.
Arguments:
im image to be output
fn name of the file to be written
"""
output_pil (im, fn, 'BMP')
#-------------------------------------------------------------------------------
def output_jpeg (im, fn):
"""
Output an image to a file in JPEG format.
Arguments:
im image to be output
fn name of the file to be written
"""
output_pil (im, fn, 'JPEG')
#-------------------------------------------------------------------------------
def output_jpg (im, fn):
"""
Output an image to a file in JPEG format.
Arguments:
im image to be output
fn name of the file to be written
"""
output_pil (im, fn, 'JPEG')
#-------------------------------------------------------------------------------
def output_pil (im, fn, format='PNG'):
"""
Output an image to a file using PIL.
Arguments:
im image to be output
fn name of the file to be written
format the format of the file to be written (default: 'PNG')
"""
import Image
ny, nx, nc = sizes (im)
bim = im.astype ('B')
if nc == 3:
pilImage = Image.fromarray (bim, 'RGB')
elif nc == 4:
pilImage = Image.fromarray (bim, 'RGBA')
else:
pilImage = Image.fromarray (bim[:,:,0], 'L')
if format == 'display': pilImage.show ()
else: pilImage.save (fn, format)
#-------------------------------------------------------------------------------
def output_png (im, fn):
"""
Output an image to a file in PNG format.
Arguments:
im image to be output
fn name of the file to be written
"""
output_pil (im, fn, 'PNG')
#-------------------------------------------------------------------------------
def output_pnm (im, fn, binary=True, stretch=False, biggreys=False):
"""
Output an image in PBMPLUS format to a file or stdout.
Arguments:
im image to be output
fn name of the file to be written
binary if True, output binary, rather than text, data (default: True)
stretch if True, contrast-stretch the image during output (default: False)
biggreys if True, output 16-bit pixels (default: False)
"""
# First, make sure we know the range of the data we are to output and
# work out the necessary scaling factor.
if biggreys: opmax = 62235; fmt = "%6d"
else: opmax = max_image_value; fmt = "%4d"
if stretch:
lo, hi = extrema (im)
opmin = 0
fac = opmax / (hi - lo)
# Open the file and write out the header.
ny, nx, nc = sizes (im)
if binary:
mode = "b"
if nc == 1: pbmtype = "P5"
elif nc == 3: pbmtype = "P6"
else: pbmtype = "P? (%d channels as binary)" % nc
else:
mode = ""
if nc == 1: pbmtype = "P2"
elif nc == 3: pbmtype = "P3"
else: pbmtype = "P? (%d channels as ASCII)" % nc
if fn == "-":
f = sys.stdout
else:
f = open (fn, "w" + mode)
f.write (pbmtype + "\n#CREATOR: eve.output_pnm\n%d %d\n%d\n" \
% (nx, ny, opmax))
if binary:
temp = im
if stretch: temp = (temp - lo) * fac
# Clip values into range opmin to opmax
f.write (temp.astype("B"))
else:
for y in xrange (0, ny):
for x in xrange (0, nx):
for c in xrange (0, nc):
if stretch:
v = (im[y,x,c] - lo) * fac
if v > opmax: v = opmax
if v < opmin: v = opmin
else:
v = im[y,x,c]
if binary:
byte = struct.pack ("B", v)
f.write (byte)
else:
f.write (fmt % v)
if not binary: f.write ("\n")
if fn != "-": f.close ()
#-------------------------------------------------------------------------------
def pca (im):
"""
Perform a principal component analysis of the channels of im,
returning the eigenvalues, kernel and mean values.
Beware: I am not yet happy that this routine is correct; in particular,
the sum of the eigenvalues does not match the sum of the variances of
the input images, which it should!
Arguments:
im image for which the PCA is to be calculated
This code is based on that written by Jan Erik Solem at
http://www.janeriksolem.net/2009/01/pca-for-images-using-python.html
"""
# Re-arrange the data and calculate the mean of all channels of each pixel.
ny, nx, nc = sizes (im)
linpix = numpy.ndarray ((nc, ny*nx))
for c in range (0, nc):
linpix[c,:] = im[:,:,c].copy().flatten()
aves = linpix.mean(axis=0)
# Mean-zero the data, form the covariance matrix, then calculate its
# eigen decomposition.
for c in range (0, nc):
linpix[c] -= aves
covmat = numpy.dot (linpix, linpix.T) / (ny * nx)
vals, vecs = numpy.linalg.eigh (covmat)
# Perform Turk's & Pentland's 'compact trick' and reverse the order as we
# want the eigenvectors and values in descending order.
temp = numpy.dot (linpix.T, vecs).T
vecs = temp[::-1]
vals = numpy.sqrt (vals)[::-1]
return vals, vecs, aves
#-------------------------------------------------------------------------------
def pca_channels (im):
"""
Perform a principal component analysis of the channels of im,
returning the eigenvalues, kernel and means.
Arguments:
im image for which the PCA is to be calculated (modified)
"""
ny, nx, nc = sizes (im)
covmat, ave = covariance (im)
vals, vecs = numpy.linalg.eigh (covmat)
perm = numpy.argsort(-vals) # sort in descending order of eigenvalue
vecs = vecs[:,perm].T # transpose gives transform kernel
for y in xrange (0, ny):
for x in xrange (0, nx):
v = im[y,x,:] - ave
im[y,x,:] = numpy.dot (vecs, v)
# The eigenvalues need to be normalized in order to keep the total variance
# of the transform equal to that of the input; this is not mentioned in the
# documentation of the eigen decomposition routine. The scale factor was
# found by experiment.
vals = vals[perm] / nc
return vals, vecs, ave
#-------------------------------------------------------------------------------
def print_peaks (pos, format="%4d %4d: %.2f", intro=None, fd=sys.stdout):
"""
Print a series of peaks out, one per line.
Arguments:
pos list containing the peaks to be printed out
format format for (y,x) and height to be printed out
(default: "%4d %4d: %.2f")
intro if supplied, this string is printed out before the peaks
fd file on which the output is to be written (default: sys.stdout)
"""
if not intro is None: print intro
for ht, y, x in pos:
print >>fd, format % (y, x, ht)
#-------------------------------------------------------------------------------
def print_positions (pos, format="%4d %4d", intro=None, fd=sys.stdout):
"""
Print a series of positions out, one per line.
Arguments:
pos list containing the positions to be printed out
format format for (y,x) to be printed out (default: "%4d %4d")
intro if supplied, this string is printed out before the positions
fd file on which the output is to be written (default: sys.stdout)
"""
if not intro is None: print intro
for y, x in pos:
print >>fd, format % (y, x)
#-------------------------------------------------------------------------------
def radial_profile (im, y0=None, x0=None, rlo=0.0, rhi=None, alo=-math.pi,
ahi=math.pi):
"""
Return an array of the rotational means at one-pixel radial spacings in an
annular region of an image.
Arguments:
im the image to be examined
y0 the y-value of the centre of the rotation (default: centre pixel)
x0 the x-value of the centre of the rotation (default: centre pixel)
rlo the inner radius of the annular region
rhi the outer radius of the annular region
alo the lower angle of the annular region (default: -pi)
ahi the higher angle of the annular region (default: pi)
"""
# Fill in the default values as necessary.
ny, nx, nc = sizes (im)
if y0 is None: y0 = ny / 2.0
if x0 is None: x0 = nx / 2.0
if rhi is None: rhi = math.sqrt ((nx - x0)**2 + (ny - y0)**2)
n = int (rhi + 1.0)
ave = numpy.zeros ((n))
num = numpy.zeros ((n))
# Cycle through the image.
for y in xrange (0, ny):
yy = (y - y0)**2
for x in xrange (0, nx):
r = math.sqrt (yy + (x-x0)**2)
if r <= 0.0: angle = 0.0
else: angle = -math.atan2 (y-y0, x-x0)
for c in xrange (0, nc):
if angle >= alo and angle <= ahi and r >= rlo and r <= rhi:
i = (r - rlo)
ave[i] += im[y,x,c]
num[i] += 1
# Convert the sums into means.
for i in xrange (0, n):
if num[i] > 0: ave[i] /= num[i]
return ave
#-------------------------------------------------------------------------------
def ramp (im):
"""
Fill an image with a grey-scale ramp.
Arguments:
im image into which the pattern is written (modified)
"""
ny, nx, nc = sizes (im)
im[:,:,:] = numpy.fromfunction (lambda i, j, k: i + j + k, ((ny, nx, nc)))
#-------------------------------------------------------------------------------
def reduce (im, blocksize):
"""
Reduce the size of an image by averaging each region of
blocksize x blocksize pixels to a single pixel, returning the result.
Arguments:
im image to be reduced in size
blocksize factor by which the size of the image is to be reduced
"""
ny, nx, nc = sizes (im)
nny = ny // blocksize
nnx = nx // blocksize
nim = image ((nny, nnx, nc))
for y in range (0, nny):
ylo = y * blocksize
yhi = ylo + blocksize
for x in range (0, nnx):
xlo = x * blocksize
xhi = xlo + blocksize
for c in range (0, nc):
nim[y,x,c] = im[ylo:yhi,xlo:xhi,c].mean()
return nim
#-------------------------------------------------------------------------------
def reflect_horizontally (im):
"""
Reflect an image horizontally.
Arguments:
im image to be reflected (modified)
"""
ny, nx, nc = sizes (im)
nx2 = nx // 2
for y in xrange (0, ny):
for x in xrange (0, nx2):
t = im[y,x,:].copy()
im[y,x,:] = im[y,nx-x-1,:].copy()
im[y,nx-x-1,:] = t
#-------------------------------------------------------------------------------
def reflect_vertically (im):
"""
Reflect an image vertically.
Arguments:
im image to be reflected (modified)
"""
ny, nx, nc = sizes (im)
ny2 = ny // 2
for y in xrange (0, ny2):
for x in xrange (0, nx):
t = im[y,x,:].copy()
im[y,x,:] = im[ny-y-1,x,:].copy()
im[ny-y-1,x,:] = t
#-------------------------------------------------------------------------------
def region (im, ylo, yhi, xlo, xhi):
"""
Return a rectangular region of an image.
Arguments:
im image from which the region is to be taken
ylo lower y-value (row) of the region
yhi higher y-value (row) of the region
xlo lower x-value (column) of the region
xhi higher x-value (column) of the region
"""
return im[ylo:yhi,xlo:xhi,:]
#-------------------------------------------------------------------------------
def resize (im, nny, nnx, order=1):
"""
Return im, re-sized to be of size (nny, nnx) by interpolation.
Arguments:
im image to be re-sized
nny number of rows in the re-sized image
nnx number of columns in the re-sized image
order order of interpolating function (defult: 1)
"""
# The following is adapted from an example in the scipy cookbook.
import scipy.ndimage
ny, nx, nc = sizes (im)
yl, xl = numpy.mgrid[0:ny-1:nny*1j,0:nx-1:nnx*1j]
coords = scipy.array ([yl, xl])
result = image ((nny, nnx, nc))
for c in range (0, nc):
result[:,:,c] = scipy.ndimage.map_coordinates (im[:,:,c], coords,
order=order)
return result
#-------------------------------------------------------------------------------
def rgb_to_hsv (im):
"""
Convert an image from RGB space to HSV.
This routine converts an image in which the red, green and blue
components are in channels 0, 1 and 2 respectively to the HSV colour
space. The hue, saturation and value components are returned in
channels 0, 1 and 2 respectively. Hue lies in the range [0,359]
while saturation and value are percentages; these are compatible
with the popular display program 'xv'.
Arguments:
im image to be converted (modified)
This routine is adapted from code written by Frank Warmerdam
<warmerdam@pobox.com> and Trent Hare; see
http://svn.osgeo.org/gdal/trunk/gdal/swig/python/samples/hsv_merge.py
"""
r = im[:,:,0]
g = im[:,:,1]
b = im[:,:,2]
maxc = numpy.maximum (r, numpy.maximum(g, b))
minc = numpy.minimum (r, numpy.minimum(g, b))
v = maxc
minc_eq_maxc = numpy.equal(minc,maxc)
# Compute the difference, but reset zeros to ones to avoid divide
# by zeros later.
ones = numpy.ones ((r.shape[0], r.shape[1]))
maxc_minus_minc = numpy.choose (minc_eq_maxc, (maxc-minc,ones))
s = (maxc - minc) / numpy.maximum (ones, maxc)
rc = (maxc - r) / maxc_minus_minc
gc = (maxc - g) / maxc_minus_minc
bc = (maxc - b) / maxc_minus_minc
maxc_is_r = numpy.equal (maxc,r)
maxc_is_g = numpy.equal (maxc,g)
maxc_is_b = numpy.equal (maxc,b)
h = numpy.zeros ((r.shape[0], r.shape[1]))
h = numpy.choose (maxc_is_b, (h, 4.0 + gc - rc))
h = numpy.choose (maxc_is_g, (h, 2.0 + rc - bc))
h = numpy.choose (maxc_is_r, (h, bc - gc))
im[:,:,0] = numpy.mod (h/6.0, 1.0) * 360.0
im[:,:,1] = s * 100.0 # to be a percentage
im[:,:,2] = v * 100.0 / max_image_value # to be a percentage
#-------------------------------------------------------------------------------
def rgb_to_mono (im):
"""
Convert an image from RGB space to luminence (the Y of YIQ).
This routine converts an image in which the red, green and blue
components are in channels 0, 1 and 2 respectively to luminance,
assuming the standard NTSC phosphor. The result is returned in a
new image.
Arguments:
im image to be converted
"""
r = im[:,:,0]
g = im[:,:,1]
b = im[:,:,2]
ny, nx, nc = sizes (im)
lum = image ((ny, nx, 1))
lum[:,:,0] = 0.299*r + 0.587*g + 0.114*b
return lum
#-------------------------------------------------------------------------------
def rgb_to_yiq (im):
"""
Convert an image from RGB space to YIQ.
This routine converts an image in which the red, green and blue components
are in channels 0, 1 and 2 respectively to the YIQ colour space, assuming
the standard NTSC phosphor. The Y, I and Q components are returned in
channels 0, 1 and 2 respectively.
Arguments:
im image to be converted (modified)
"""
r = im[:,:,0]
g = im[:,:,1]
b = im[:,:,2]
im[:,:,0] = 0.299*r + 0.587*g + 0.114*b
im[:,:,1] = 0.596*r - 0.275*g - 0.321*b
im[:,:,2] = 0.212*r - 0.523*g + 0.311*b
#-------------------------------------------------------------------------------
def set_mean_sd (im, newmean, newsd):
"""
Rescale the image to a given mean and sd.
Arguments:
im image to be rescaled (modified)
newmean mean the image is to have after rescaling
newsd standard deviation that the image is to have after rescaling
"""
oldmean = mean (im)
oldsd = sd (im)
im -= oldmean
im /= oldsd
im *= newsd
im += newmean
#-------------------------------------------------------------------------------
def sd (im):
"""
Return the standard deviation of an image.
Arguments:
im image for which the standard deviation is to be found
"""
return im.std (ddof=1)
#-------------------------------------------------------------------------------
def segment_hsv (im, hlo, hhi, slo, shi, vlo, vhi, ishsv=False):
"""
Return a binary mask identifying pixels that fall within a region
in HSV space.
Arguments:
im image in which regions are to be found
hlo lowest HSV hue
hhi highest HSV hue
slo lowest HSV saturation
shi highest HSV saturation
vlo lowest HSV value
vhi highest HSV value
ishsv if True, the input image contains pixels in HSV format rather
than RGB (default: False)
"""
hsvim = copy (im)
if not ishsv: rgb_to_hsv (hsvim)
ny, nx, nc = sizes (hsvim)
mask = image ((ny, nx, 1))
h = hsvim[:,:,0]
s = hsvim[:,:,1]
v = hsvim[:,:,2]
if hlo > hhi:
m = ((h < hlo) | (hhi < h)) & (slo < s) & (s < shi) & \
(vlo < v) & (v < vhi) # we span 360 degrees
else:
m = ((hlo < h) & (h < hhi)) & (slo < s) & (s < shi) & \
(vlo < v) & (v < vhi)
mask[numpy.where (m)] = max_image_value
return mask
#-------------------------------------------------------------------------------
def select_matches (scores, locs1, locs2, max_score_factor=5, max_matches=50):
"""Choose the matches with the best scores.
Arguments:
scores list of scores from match_descriptors
locs1 locations of features found on the first image
locs2 locations of features found on the second image
max_score_factor ratio of the worst match to the best (default: 5)
max_matches maximum number of matches to return (default: 50)
"""
n = len(scores)
matches = []
thresh = scores[0][0] * max_score_factor
for i in range (0, n):
if scores[i][0] <= thresh:
i1 = scores[i][1] # first image
i2 = scores[i][2] # second image
y1 = locs1[i1,0]
x1 = locs1[i1,1]
y2 = locs2[i2,0]
x2 = locs2[i2,1]
matches.append ([y1, x1, y2, x2])
if len (matches) >= max_matches: break
else:
break
return matches
#-------------------------------------------------------------------------------
def set (im, v):
"""
Set all the pixels of an image to a value.
Arguments:
im image to be set (modified)
v value to which the pixels are to be set
"""
im[:,:,:] = v
#-------------------------------------------------------------------------------
def set_channel (im, c, ch):
"""
Set a channel of an image.
Arguments:
im image in which the channel is to be inserted (modified)
c number of the channel which is to be set
ch single-channel image which is to be inserted
"""
im[:,:,c] = ch[:,:,0]
#-------------------------------------------------------------------------------
def set_region (im, yfrom, xfrom, yto, xto, v):
"""
Set a region of an image to a constant value.
Arguments:
im image in which the region is to be set (modified)
ylo lower y-value (row) of the region
yhi higher y-value (row) of the region
xlo lower x-value (column) of the region
xhi higher x-value (column) of the region
v value to which the region is to be set
"""
im[yfrom:yto,xfrom:xto,:] = v
#-------------------------------------------------------------------------------
def sift (im, program='sift %i -o %o 1> /dev/null'):
"""
Run the SIFT program on an image and return the corresponding keypoints.
Two arrays are returned, the first giving the locations (and
corresponding scales and orientations) of the feature points found,
while the second contains the associated descriptors.
The particular implementation of SIFT used is from
http://www.vlfeat.org/, which has the advantages that it is open
source, available on all major platforms, and its coordinate system
matches that used by EVE. However, the values are not identical to
those returned by Lowe's own SIFT; see the abovementioned website
for details.
Arguments:
im the image for which the keypoints are to be found
program if supplied, the pathname of the SIFT program
(default: 'sift %i -o %o 1> /dev/null')
"""
kptfn, kptfd = sift_run (im, program)
features = sift_keypoints (kptfn)
os.close (kptfd)
return features
#-------------------------------------------------------------------------------
def sift_keypoints (fn):
"""
Return the SIFT keypoints of an image.
Two arrays are returned, the first giving the locations (and
corresponding scales and orientations) of the feature points found,
while the second contains the associated descriptors.
Arguments:
im name of a file containing the SIFT keypoints
"""
import scipy.linalg
# Read in the keypoints from the file. We read the entire file into memory,
# where it ends up as a list with one line of the file in each element.
# Each line contains exactly 132 elements which give the position and
# orientation of the feature and its descriptor; we split these out into
# the arrays called locs and descs, normalising the latter en route. We
# ultimately return locs and descs.
fd = open (fn)
lines = fd.readlines()
fd.close()
lf = 128 # length of each descriptor
nf = len (lines) # number of features
if nf == 0: return None, None
locs = numpy.zeros ((nf, 4))
descs = numpy.zeros ((nf, lf))
for f in xrange (0, nf):
v = lines[f].split()
p = 0
# row, col, scale, orientation
locs[f,1] = float (v[p])
locs[f,0] = float (v[p+1])
locs[f,2] = float (v[p+2])
locs[f,3] = float (v[p+3])
p += 4
for i in xrange (0, lf):
descs[f,i] = float (v[p+i])
descs[f] = descs[f] / scipy.linalg.norm (descs[f])
return locs, descs
#-------------------------------------------------------------------------------
def sift_run (im, program='sift %i -o %o 1> /dev/null'):
"""
Run the SIFT program on an image and return name of the file
containing the corresponding keypoints.
Arguments:
im the image for which the keypoints are to be found
program if supplied, the pathname of the SIFT program
(default: 'sift %i -o %o 1> /dev/null')
"""
ny, nx, nc = sizes (im)
if nc == 1: im1 = im
else: im1 = mono (im)
# Save the image to a temporary file and run SIFT on it, gathering the
# resulting keypoints into a separate temporary file.
infd, infn = tempfile.mkstemp (".pgm")
kptfd, kptfn = tempfile.mkstemp (".sift")
output_pnm (im1, infn)
cmd = re.sub ('%i', infn, program)
cmd = re.sub ('%o', kptfn, cmd)
os.system (cmd)
os.close (infd)
return kptfn, kptfd
#-------------------------------------------------------------------------------
def susan (im, program='susan %i %o -c 1> /dev/null'):
"""
Process an image using the SUSAN feature point detector.
Arguments:
im the image for which the keypoints are to be found
program if supplied, the pathname of the SIFT program
(default: 'susan %i %o 1> /dev/null')
"""
# SUSAN requires a single-channel image.
ny, nx, nc = sizes (im)
if nc == 1: im1 = im
else: im1 = mono (im)
# Save the image to a temporary file, run SUSAN on it, and read in the
# result, which we return.
handle, infn = tempfile.mkstemp (".pgm")
handle, opfn = tempfile.mkstemp (".pgm")
output_pnm (im1, infn)
cmd = re.sub ('%i', infn, program)
cmd = re.sub ('%o', opfn, cmd)
os.system (cmd)
susim = image (opfn)
return susim
#-------------------------------------------------------------------------------
def sizes (im):
"""
Return the dimensions of an image as a list.
Arguments:
im the image whose dimensions are to be returned
"""
return im.shape
#-------------------------------------------------------------------------------
def snr (im1, im2):
"""
Return the signal-to-noise ratio between two images.
Arguments:
im1 first image to be used in calculating the SNR
im2 first image to be used in calculating the SNR
"""
r = correlation_coefficient (im1, im2)
if r <= 0.0: return 0.0
return math.sqrt (r / (1.0 - r))
#-------------------------------------------------------------------------------
def sobel (im):
"""
Perform edge detection in im using the Sobel operator, returning the
result.
Arguments:
im image in which the edges are to be found
"""
import scipy
import scipy.ndimage as ndimage
# Convert the EVE-format image into one compatible with scipy, run its
# Sobel routine, then convert the result back into EVE format and return it.
ny, nx, nc = sizes (im)
if nc == 1: sci_im = im[:,:,0]
else: sci_im = mono(im)[:,:,0]
grad_x = ndimage.sobel(sci_im, 0)
grad_y = ndimage.sobel(sci_im, 1)
grad_mag = scipy.sqrt(grad_x**2+grad_y**2)
gm = image ((ny,nx,1))
gm[:,:,0] = grad_mag[:,:]
return gm
#-------------------------------------------------------------------------------
def ssd (im1, im2):
"""
Return the sum-squared difference between two images.
Arguments:
im1 image form which im2 is to be subtracted
im2 image to be subtracted from im1
"""
return ((im1 - im2)**2).sum()
#-------------------------------------------------------------------------------
def statistics (im):
"""
Return important statistics of an image.
This routine returns the minimum, maximum, mean and standard deviation
as a list.
Arguments:
im image for which the statistics are to be calculated
"""
lo, hi = extrema (im)
ave = mean (im)
sdev = sd (im)
return [lo, hi, ave, sdev]
#-------------------------------------------------------------------------------
def subsample (im, inc=2):
"""
Sub-sample an image by selecting every inc-th pixel from every inc-th line.
The sub-sampled image is returned.
Arguments:
im image to be sub-sampled
inc the number of pixels between sub-samples (default: 2)
"""
ny, nx, nc = sizes (im)
ny2 = ny // inc
nx2 = nx // inc
im2 = image ((ny2, nx2, nc))
for y in xrange (0, ny2):
for x in xrange (0, nx2):
im2[y,x,:] = im[inc*y,inc*x,:]
return im2
#-------------------------------------------------------------------------------
def sum (im):
"""
Return the sum of all the values of an image.
Arguments:
im image for which the sum is to be found
"""
return im.sum()
#-------------------------------------------------------------------------------
def thong (im, scale=64.0, offset=128.0):
"""
Fill an image with Tran Thong's zone-plate-like test pattern.
Arguments:
im image to contain the pattern (modified)
scale maximum deviation of the pattern from the mean
offset mean of the resulting pattern
"""
# Work out the centre of the region and the various fiddle factors.
ny, nx, nc = sizes (im)
xc = nx // 2
yc = ny // 2
nmax = ny
if nx > ny: nmax = nx
rad = 0.4 * nmax
rad2 = rad / 2.0
radsqd = rad * rad
radsq4 = radsqd / 4.0
fac = 2 * math.pi * 0.496
# Fill the region with the pattern.
for y in xrange (0, ny):
yy = (y - yc) **2
for x in xrange (0, nx):
rsqd = (x - xc) **2 + yy
if rsqd <= radsq4:
v = scale * math.cos (fac*rsqd/rad) + offset
for c in xrange (0, nc):
im[y,x,c] = v
else:
r = math.sqrt (rsqd)
v = scale * math.cos (fac * (2*r - rsqd/rad - rad2)) + offset
im[y,x,:] = v
#-------------------------------------------------------------------------------
def transpose (im):
"""
Transpose an image, returning the result.
Arguments:
im image to be transposed
"""
ny, nx, nc = sizes (im)
tr = image ((nx, ny, nc))
return numpy.transpose (im, axes=(1, 0, 2))
#-------------------------------------------------------------------------------
def variance (im):
"""
Return the variance of an image.
Arguments:
im image for which the standard deviation is to be found
"""
return im.var (ddof=1)
#-------------------------------------------------------------------------------
def version ():
"""
Return the version of the Easy Vision Environment.
"""
return timestamp[13:-1]
#-------------------------------------------------------------------------------
def version_info (prefix=" ", intro="Modules:"):
"""
Return a string containing version information.
Arguments:
prefix text to precede each line of text (default: ' ')
intro text to precede the output (default: 'Modules:')
"""
import Image
fmt = "%s%-9s %s\n" * 6
s = intro + "\n" + fmt % (prefix, "EVE:", version(),
prefix, "numpy:", numpy.__version__,
prefix, "Image:", Image.VERSION,
prefix, "Python:", platform.python_version (),
prefix, "Compiler:", platform.python_compiler (),
prefix, "Build:", platform.python_build ()
)
return s
#-------------------------------------------------------------------------------
def zero (im):
"""
Set all pixels of an image to zero.
Arguments:
im image to be zeroed (modified)
"""
set (im, 0.0)
#-------------------------------------------------------------------------------
# Main program
#-------------------------------------------------------------------------------
if __name__ == "__main__":
print "There is a separate test script to check that EVE works correctly"
print "on your platform, available from:\n"
print " http://vase.essex.ac/uk/software/eve/\n"
timestamp = "Time-stamp: <2015-03-10 09:20:56 Adrian F Clark (alien@essex.ac.uk)>"
# Local Variables:
# time-stamp-line-limit: -10
# End:
#-------------------------------------------------------------------------------
# End of EVE
#-------------------------------------------------------------------------------
|
betoesquivel/ComputerVisionLab1
|
eve.py
|
Python
|
mit
| 143,522
|
[
"Gaussian"
] |
3e57f368656bc7adbc75f135c1161448c1b3a276b28e3c4772e3c8f53fbbb002
|
import psi4
import forte
def psi4_scf(geom, basis, reference, functional='hf', options={}) -> (float, psi4.core.Wavefunction):
"""Run a psi4 scf computation and return the energy and the Wavefunction object
Parameters
----------
geom : str
The molecular geometry (in xyz or zmat)
basis : str
The computational basis set
reference : str
The type of reference (rhf, uhf, rohf)
functional : str
The functional type for DFT (default = HF exchange)
Returns
-------
tuple(double, psi4::Wavefunction)
a tuple containing the energy and the Wavefunction object
"""
# clean psi4
psi4.core.clean()
# build the molecule object
mol = psi4.geometry(geom)
# add basis/reference/scf_type to options passed by the user
default_options = {'SCF_TYPE': 'PK', 'E_CONVERGENCE': 1.0e-11, 'D_CONVERGENCE': 1.0e-6}
# capitalize the options
options = {k.upper(): v for k, v in options.items()}
default_options = {k.upper(): v for k, v in default_options.items()}
# merge the two dictionaries. The user-provided options will overwrite the default ones
merged_options = {**default_options, **options}
# add the mandatory arguments
merged_options['BASIS'] = basis
merged_options['REFERENCE'] = reference
psi4.set_options(merged_options)
# pipe output to the file output.dat
psi4.core.set_output_file('output.dat', True)
# run scf and return the energy and a wavefunction object (will work only if pass return_wfn=True)
E_scf, wfn = psi4.energy(functional, molecule=mol, return_wfn=True)
return (E_scf, wfn)
def psi4_casscf(geom, basis, reference, restricted_docc, active, options={}) -> (float, psi4.core.Wavefunction):
"""Run a psi4 casscf computation and return the energy and the Wavefunction object
Parameters
----------
geom : str
The molecular geometry (in xyz or zmat)
basis : str
The computational basis set
reference : str
The type of reference (rhf, uhf, rohf)
Returns
-------
tuple(double, psi4::Wavefunction)
a tuple containing the energy and the Wavefunction object
"""
# build the molecule object
mol = psi4.geometry(geom)
# add basis/reference/scf_type to options passed by the user
default_options = {'SCF_TYPE': 'pk', 'E_CONVERGENCE': 1.0e-10, 'D_CONVERGENCE': 1.0e-6}
# capitalize the options
options = {k.upper(): v for k, v in options.items()}
default_options = {k.upper(): v for k, v in default_options.items()}
# merge the two dictionaries. The user-provided options will overwrite the default ones
merged_options = {**default_options, **options}
# add the mandatory arguments
merged_options['BASIS'] = basis
merged_options['REFERENCE'] = reference
merged_options['RESTRICTED_DOCC'] = restricted_docc
merged_options['ACTIVE'] = active
merged_options['MCSCF_MAXITER'] = 100
merged_options['MCSCF_E_CONVERGENCE'] = 1.0e-10
merged_options['MCSCF_R_CONVERGENCE'] = 1.0e-6
merged_options['MCSCF_DIIS_START'] = 20
psi4.set_options(merged_options)
# pipe output to the file output.dat
psi4.core.set_output_file('output.dat', True)
# psi4.core.clean()
# run scf and return the energy and a wavefunction object (will work only if pass return_wfn=True)
E_scf, wfn = psi4.energy('casscf', molecule=mol, return_wfn=True)
return (E_scf, wfn)
def psi4_casscf(geom, basis, mo_spaces):
"""
Run a Psi4 SCF.
:param geom: a string for molecular geometry
:param basis: a string for basis set
:param reference: a string for the type of reference
:return: a tuple of (scf energy, psi4 Wavefunction)
"""
psi4.core.clean()
mol = psi4.geometry(geom)
psi4.set_options(
{
'basis': basis,
'scf_type': 'pk',
'e_convergence': 1e-13,
'd_convergence': 1e-6,
'restricted_docc': mo_spaces['RESTRICTED_DOCC'],
'active': mo_spaces['ACTIVE'],
'mcscf_maxiter': 100,
'mcscf_e_convergence': 1.0e-11,
'mcscf_r_convergence': 1.0e-6,
'mcscf_diis_start': 20
}
)
psi4.core.set_output_file('output.dat', False)
Escf, wfn = psi4.energy('casscf', return_wfn=True)
psi4.core.clean()
return Escf, wfn
def psi4_cubeprop(wfn, path='.', orbs=[], nocc=0, nvir=0, density=False, frontier_orbitals=False, load=False):
"""
Run a psi4 cubeprop computation to generate cube files from a given Wavefunction object
By default this function plots from the HOMO -2 to the LUMO + 2
Parameters
----------
wfn : psi4Wavefunction
A psi4 Wavefunction object
path : str
The path of the directory that will contain the cube files
orbs : list or string
The list of orbitals to convert to cube files (one based).
nocc : int
The number of occupied orbitals
nvir : int
The number of virtual orbitals
"""
import os.path
cubeprop_tasks = []
if isinstance(orbs, str):
if (orbs == 'frontier_orbitals'):
cubeprop_tasks.append('FRONTIER_ORBITALS')
else:
cubeprop_tasks.append('ORBITALS')
if nocc + nvir > 0:
na = wfn.nalpha()
nmo = wfn.nmo()
min_orb = max(1, na + 1 - nocc)
max_orb = min(nmo, na + nvir)
orbs = [k for k in range(min_orb, max_orb + 1)]
print(f'Preparing cube files for orbitals: {", ".join([str(orb) for orb in orbs])}')
if density:
cubeprop_tasks.append('DENSITY')
if not os.path.exists(path):
os.makedirs(path)
psi4.set_options({'CUBEPROP_TASKS': cubeprop_tasks, 'CUBEPROP_ORBITALS': orbs, 'CUBEPROP_FILEPATH': path})
psi4.cubeprop(wfn)
def prepare_forte_objects(
wfn, mo_spaces=None, active_space='ACTIVE', core_spaces=['RESTRICTED_DOCC'], localize=False, localize_spaces=[]
):
"""Take a psi4 wavefunction object and prepare the ForteIntegrals, SCFInfo, and MOSpaceInfo objects
Parameters
----------
wfn : psi4 Wavefunction
A psi4 Wavefunction object
mo_spaces : dict
A dictionary with the size of each space (e.g., {'ACTIVE' : [3]})
active_space : str
The MO space treated as active (default: 'ACTIVE')
core_spaces : list(str)
The MO spaces treated as active (default: ['RESTRICTED_DOCC'])
localize : bool
Do localize the orbitals? (defaul: False)
localize_spaces : list(str)
A list of spaces to localize (default: [])
Returns
-------
dict(ForteIntegrals, ActiveSpaceIntegrals, SCFInfo, MOSpaceInfo, map(StateInfo : list))
a dictionary containing the ForteIntegrals, SCFInfo, and MOSpaceInfo objects and a map of states and weights
"""
# fill in the options object
options = forte.forte_options
if ('DF' in options.get_str('INT_TYPE')):
aux_basis = psi4.core.BasisSet.build(
wfn.molecule(), 'DF_BASIS_MP2', psi4.core.get_global_option('DF_BASIS_MP2'), 'RIFIT',
psi4.core.get_global_option('BASIS')
)
wfn.set_basisset('DF_BASIS_MP2', aux_basis)
if (options.get_str('MINAO_BASIS')):
minao_basis = psi4.core.BasisSet.build(wfn.molecule(), 'MINAO_BASIS', options.get_str('MINAO_BASIS'))
wfn.set_basisset('MINAO_BASIS', minao_basis)
# Prepare base objects
scf_info = forte.SCFInfo(wfn)
# Grab the number of MOs per irrep
nmopi = wfn.nmopi()
# Grab the point group symbol (e.g. "C2V")
point_group = wfn.molecule().point_group().symbol()
# create a MOSpaceInfo object
if mo_spaces is None:
mo_space_info = forte.make_mo_space_info(nmopi, point_group, options)
else:
mo_space_info = forte.make_mo_space_info_from_map(nmopi, point_group, mo_spaces, [])
state_weights_map = forte.make_state_weights_map(options, mo_space_info)
# make a ForteIntegral object
ints = forte.make_ints_from_psi4(wfn, options, mo_space_info)
if localize:
localizer = forte.Localize(forte.forte_options, ints, mo_space_info)
localizer.set_orbital_space(localize_spaces)
localizer.compute_transformation()
Ua = localizer.get_Ua()
ints.rotate_orbitals(Ua, Ua)
# the space that defines the active orbitals. We select only the 'ACTIVE' part
# the space(s) with non-active doubly occupied orbitals
# create active space integrals
as_ints = forte.make_active_space_ints(mo_space_info, ints, active_space, core_spaces)
return {
'ints': ints,
'as_ints': as_ints,
'scf_info': scf_info,
'mo_space_info': mo_space_info,
'state_weights_map': state_weights_map
}
def prepare_ints_rdms(wfn, mo_spaces, rdm_level=3):
"""
Preparation step for DSRG: compute a CAS and its RDMs.
:param wfn: reference wave function from psi4
:param mo_spaces: a dictionary {mo_space: occupation}, e.g., {'ACTIVE': [0,0,0,0]}
:param rdm_level: max RDM to be computed
:return: a tuple of (reference energy, MOSpaceInfo, ForteIntegrals, RDMs)
"""
forte_objects = prepare_forte_objects(wfn, mo_spaces)
ints = forte_objects['ints']
as_ints = forte_objects['as_ints']
scf_info = forte_objects['scf_info']
mo_space_info = forte_objects['mo_space_info']
state_weights_map = forte_objects['state_weights_map']
# build a map {StateInfo: a list of weights} for multi-state computations
state_weights_map = forte.make_state_weights_map(forte.forte_options, mo_space_info)
# converts {StateInfo: weights} to {StateInfo: nroots}
state_map = forte.to_state_nroots_map(state_weights_map)
# create an active space solver object and compute the energy
as_solver_type = 'FCI'
as_solver = forte.make_active_space_solver(
as_solver_type, state_map, scf_info, mo_space_info, as_ints, forte.forte_options
)
state_energies_list = as_solver.compute_energy() # a map {StateInfo: a list of energies}
# compute averaged energy --- reference energy for DSRG
Eref = forte.compute_average_state_energy(state_energies_list, state_weights_map)
# compute RDMs
rdms = as_solver.compute_average_rdms(state_weights_map, rdm_level)
# semicanonicalize orbitals
semi = forte.SemiCanonical(mo_space_info, ints, forte.forte_options)
semi.semicanonicalize(rdms, rdm_level)
return {'reference_energy': Eref, 'mo_space_info': mo_space_info, 'ints': ints, 'rdms': rdms}
|
evangelistalab/forte
|
forte/utils/helpers.py
|
Python
|
lgpl-3.0
| 10,615
|
[
"Psi4"
] |
663c1080ba6ce48c54796a0ae679fe58c96348ae23e6ebb6226db4452b8721cb
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from io import StringIO
import pytest
import os
import warnings
import re
import textwrap
from unittest.mock import Mock, patch
import sys
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal,
assert_array_almost_equal, assert_array_equal,
assert_allclose)
from itertools import combinations_with_replacement as comb_wr
import MDAnalysis as mda
import MDAnalysis.lib.util as util
import MDAnalysis.lib.mdamath as mdamath
from MDAnalysis.lib.util import (cached, static_variables, warn_if_not_unique,
check_coords)
from MDAnalysis.core.topologyattrs import Bonds
from MDAnalysis.exceptions import NoDataError, DuplicateWarning
from MDAnalysisTests.datafiles import (
Make_Whole, TPR, GRO, fullerene, two_water_gro,
)
def test_absence_cutil():
with patch.dict('sys.modules', {'MDAnalysis.lib._cutil':None}):
import importlib
with pytest.raises(ImportError):
importlib.reload(sys.modules['MDAnalysis.lib.util'])
def test_presence_cutil():
mock = Mock()
with patch.dict('sys.modules', {'MDAnalysis.lib._cutil':mock}):
try:
import MDAnalysis.lib._cutil
except ImportError:
pytest.fail(msg='''MDAnalysis.lib._cutil should not raise
an ImportError if cutil is available.''')
def convert_aa_code_long_data():
aa = [
('H',
('HIS', 'HISA', 'HISB', 'HSE', 'HSD', 'HIS1', 'HIS2', 'HIE', 'HID')),
('K', ('LYS', 'LYSH', 'LYN')),
('A', ('ALA',)),
('D', ('ASP', 'ASPH', 'ASH')),
('E', ('GLU', 'GLUH', 'GLH')),
('N', ('ASN',)),
('Q', ('GLN',)),
('C', ('CYS', 'CYSH', 'CYS1', 'CYS2')),
]
for resname1, strings in aa:
for resname3 in strings:
yield (resname3, resname1)
class TestStringFunctions(object):
# (1-letter, (canonical 3 letter, other 3/4 letter, ....))
aa = [
('H',
('HIS', 'HISA', 'HISB', 'HSE', 'HSD', 'HIS1', 'HIS2', 'HIE', 'HID')),
('K', ('LYS', 'LYSH', 'LYN')),
('A', ('ALA',)),
('D', ('ASP', 'ASPH', 'ASH')),
('E', ('GLU', 'GLUH', 'GLH')),
('N', ('ASN',)),
('Q', ('GLN',)),
('C', ('CYS', 'CYSH', 'CYS1', 'CYS2')),
]
residues = [
("LYS300:HZ1", ("LYS", 300, "HZ1")),
("K300:HZ1", ("LYS", 300, "HZ1")),
("K300", ("LYS", 300, None)),
("LYS 300:HZ1", ("LYS", 300, "HZ1")),
("M1:CA", ("MET", 1, "CA")),
]
@pytest.mark.parametrize('rstring, residue', residues)
def test_parse_residue(self, rstring, residue):
assert util.parse_residue(rstring) == residue
def test_parse_residue_ValueError(self):
with pytest.raises(ValueError):
util.parse_residue('ZZZ')
@pytest.mark.parametrize('resname3, resname1', convert_aa_code_long_data())
def test_convert_aa_3to1(self, resname3, resname1):
assert util.convert_aa_code(resname3) == resname1
@pytest.mark.parametrize('resname1, strings', aa)
def test_convert_aa_1to3(self, resname1, strings):
assert util.convert_aa_code(resname1) == strings[0]
@pytest.mark.parametrize('x', (
'XYZXYZ',
'£'
))
def test_ValueError(self, x):
with pytest.raises(ValueError):
util.convert_aa_code(x)
def test_greedy_splitext(inp="foo/bar/boing.2.pdb.bz2",
ref=["foo/bar/boing", ".2.pdb.bz2"]):
inp = os.path.normpath(inp)
ref[0] = os.path.normpath(ref[0])
ref[1] = os.path.normpath(ref[1])
root, ext = util.greedy_splitext(inp)
assert root == ref[0], "root incorrect"
assert ext == ref[1], "extension incorrect"
@pytest.mark.parametrize('iterable, value', [
([1, 2, 3], True),
([], True),
((1, 2, 3), True),
((), True),
(range(3), True),
(np.array([1, 2, 3]), True),
(123, False),
("byte string", False),
(u"unicode string", False)
])
def test_iterable(iterable, value):
assert util.iterable(iterable) == value
class TestFilename(object):
root = "foo"
filename = "foo.psf"
ext = "pdb"
filename2 = "foo.pdb"
@pytest.mark.parametrize('name, ext, keep, actual_name', [
(filename, None, False, filename),
(filename, ext, False, filename2),
(filename, ext, True, filename),
(root, ext, False, filename2),
(root, ext, True, filename2)
])
def test_string(self, name, ext, keep, actual_name):
file_name = util.filename(name, ext, keep)
assert file_name == actual_name
def test_named_stream(self):
ns = util.NamedStream(StringIO(), self.filename)
fn = util.filename(ns, ext=self.ext)
# assert_equal replace by this if loop to avoid segfault on some systems
if fn != ns:
pytest.fail("fn and ns are different")
assert str(fn) == self.filename2
assert ns.name == self.filename2
class TestGeometryFunctions(object):
e1, e2, e3 = np.eye(3)
a = np.array([np.cos(np.pi / 3), np.sin(np.pi / 3), 0])
null = np.zeros(3)
@pytest.mark.parametrize('x_axis, y_axis, value', [
# Unit vectors
(e1, e2, np.pi / 2),
(e1, a, np.pi / 3),
# Angle vectors
(2 * e1, e2, np.pi / 2),
(-2 * e1, e2, np.pi - np.pi / 2),
(23.3 * e1, a, np.pi / 3),
# Null vector
(e1, null, np.nan),
# Coleniar
(a, a, 0.0)
])
def test_vectors(self, x_axis, y_axis, value):
assert_allclose(mdamath.angle(x_axis, y_axis), value)
@pytest.mark.parametrize('x_axis, y_axis, value', [
(-2.3456e7 * e1, 3.4567e-6 * e1, np.pi),
(2.3456e7 * e1, 3.4567e-6 * e1, 0.0)
])
def test_angle_pi(self, x_axis, y_axis, value):
assert_almost_equal(mdamath.angle(x_axis, y_axis), value)
@pytest.mark.parametrize('x', np.linspace(0, np.pi, 20))
def test_angle_range(self, x):
r = 1000.
v = r * np.array([np.cos(x), np.sin(x), 0])
assert_almost_equal(mdamath.angle(self.e1, v), x, 6)
@pytest.mark.parametrize('vector, value', [
(e3, 1),
(a, np.linalg.norm(a)),
(null, 0.0)
])
def test_norm(self, vector, value):
assert mdamath.norm(vector) == value
@pytest.mark.parametrize('x', np.linspace(0, np.pi, 20))
def test_norm_range(self, x):
r = 1000.
v = r * np.array([np.cos(x), np.sin(x), 0])
assert_almost_equal(mdamath.norm(v), r, 6)
@pytest.mark.parametrize('vec1, vec2, value', [
(e1, e2, e3),
(e1, null, 0.0)
])
def test_normal(self, vec1, vec2, value):
assert_allclose(mdamath.normal(vec1, vec2), value)
# add more non-trivial tests
def test_angle_lower_clip(self):
a = np.array([0.1, 0, 0.2])
x = np.dot(a**0.5, -(a**0.5)) / \
(mdamath.norm(a**0.5) * mdamath.norm(-(a**0.5)))
assert x < -1.0
assert mdamath.angle(a, -(a)) == np.pi
assert mdamath.angle(a**0.5, -(a**0.5)) == np.pi
def test_stp(self):
assert mdamath.stp(self.e1, self.e2, self.e3) == 1.0
# add more non-trivial tests
def test_dihedral(self):
ab = self.e1
bc = ab + self.e2
cd = bc + self.e3
assert_almost_equal(mdamath.dihedral(ab, bc, cd), -np.pi / 2)
def test_pdot(self):
arr = np.random.rand(4, 3)
matrix_dot = mdamath.pdot(arr, arr)
list_dot = [np.dot(a, a) for a in arr]
assert_almost_equal(matrix_dot, list_dot)
def test_pnorm(self):
arr = np.random.rand(4, 3)
matrix_norm = mdamath.pnorm(arr)
list_norm = [np.linalg.norm(a) for a in arr]
assert_almost_equal(matrix_norm, list_norm)
class TestMatrixOperations(object):
def ref_trivecs(self, box):
box = np.asarray(box, dtype=np.float64)
x, y, z, a, b, c = box
# Only positive edge lengths and angles in (0, 180) are allowed:
if np.any(box <= 0) or a >= 180 or b >= 180 or c >= 180:
ref = np.zeros((3, 3), dtype=np.float32)
# detect orthogonal boxes:
elif a == 90 and b == 90 and c == 90:
ref = np.diag(box[:3].astype(np.float32))
else:
ref = np.zeros((3, 3), dtype=np.float64)
cos_a = 0.0 if a == 90 else np.cos(np.deg2rad(a))
cos_b = 0.0 if b == 90 else np.cos(np.deg2rad(b))
cos_c = 0.0 if c == 90 else np.cos(np.deg2rad(c))
sin_c = 1.0 if c == 90 else np.sin(np.deg2rad(c))
ref[0, 0] = x
ref[1, 0] = y * cos_c
ref[1, 1] = y * sin_c
ref[2, 0] = z * cos_b
ref[2, 1] = z * (cos_a - cos_b * cos_c) / sin_c
ref[2, 2] = np.sqrt(z * z - ref[2, 0] ** 2 - ref[2, 1] ** 2)
if ref[2, 2] == 0 or np.isnan(ref[2, 2]):
ref[:, :] = 0.0
ref = ref.astype(np.float32)
return ref
def ref_trivecs_unsafe(self, box):
box = np.asarray(box, dtype=np.float64)
x, y, z, a, b, c = box
# detect orthogonal boxes:
if a == 90 and b == 90 and c == 90:
ref = np.diag(box[:3].astype(np.float32))
else:
ref = np.zeros((3, 3), dtype=np.float64)
cos_a = 0.0 if a == 90 else np.cos(np.deg2rad(a))
cos_b = 0.0 if b == 90 else np.cos(np.deg2rad(b))
cos_c = 0.0 if c == 90 else np.cos(np.deg2rad(c))
sin_c = 1.0 if c == 90 else np.sin(np.deg2rad(c))
ref[0, 0] = x
ref[1, 0] = y * cos_c
ref[1, 1] = y * sin_c
ref[2, 0] = z * cos_b
ref[2, 1] = z * (cos_a - cos_b * cos_c) / sin_c
ref[2, 2] = np.sqrt(z * z - ref[2, 0] ** 2 - ref[2, 1] ** 2)
ref = ref.astype(np.float32)
return ref
def ref_tribox(self, tri_vecs):
tri_vecs = tri_vecs.astype(np.float64)
x, y, z = np.linalg.norm(tri_vecs, axis=1)
a = np.rad2deg(np.arccos(np.dot(tri_vecs[1], tri_vecs[2]) / (y * z)))
b = np.rad2deg(np.arccos(np.dot(tri_vecs[0], tri_vecs[2]) / (x * z)))
c = np.rad2deg(np.arccos(np.dot(tri_vecs[0], tri_vecs[1]) / (x * y)))
box = np.array([x, y, z, a, b, c], dtype=np.float32)
if not (np.all(box > 0) and a < 180 and b < 180 and c < 180):
box = np.zeros(6, dtype=np.float32)
return box
@pytest.mark.parametrize('lengths', comb_wr([-1, 0, 1, 2], 3))
@pytest.mark.parametrize('angles',
comb_wr([-10, 0, 20, 70, 90, 120, 180], 3))
def test_triclinic_vectors(self, lengths, angles):
box = lengths + angles
ref = self.ref_trivecs(box)
res = mdamath.triclinic_vectors(box)
assert_array_equal(res, ref)
# check for default dtype:
assert res.dtype == np.float32
# belts and braces, make sure upper triangle is always zero:
assert not(res[0, 1] or res[0, 2] or res[1, 2])
@pytest.mark.parametrize('alpha', (60, 90))
@pytest.mark.parametrize('beta', (60, 90))
@pytest.mark.parametrize('gamma', (60, 90))
def test_triclinic_vectors_right_angle_zeros(self, alpha, beta, gamma):
angles = [alpha, beta, gamma]
box = [10, 20, 30] + angles
mat = mdamath.triclinic_vectors(box)
if 90 in angles:
if gamma == 90:
assert not mat[1, 0]
if alpha == 90:
assert not mat[2, 1]
if beta == 90:
assert not mat[2, 0]
else:
assert mat[2, 0]
else:
assert mat[2, 1]
else:
assert mat[1, 0]
if beta == 90:
assert not mat[2, 0]
if alpha == 90:
assert not mat[2, 1]
else:
assert mat[2, 1]
else:
assert mat[2, 0]
# 2, 1 cannot be zero here regardless of alpha
assert mat[2, 1]
else:
assert mat[1, 0] and mat[2, 0] and mat[2, 1]
@pytest.mark.parametrize('dtype', (int, float, np.float32, np.float64))
def test_triclinic_vectors_retval(self, dtype):
# valid box
box = [1, 1, 1, 70, 80, 90]
res = mdamath.triclinic_vectors(box, dtype=dtype)
assert res.shape == (3, 3)
assert res.dtype == dtype
# zero box
box = [0, 0, 0, 0, 0, 0]
res = mdamath.triclinic_vectors(box, dtype=dtype)
assert res.shape == (3, 3)
assert res.dtype == dtype
assert np.all(res == 0)
# invalid box angles
box = [1, 1, 1, 40, 40, 90]
res = mdamath.triclinic_vectors(box, dtype=dtype)
assert res.shape == (3, 3)
assert res.dtype == dtype
assert np.all(res == 0)
# invalid box lengths:
box = [-1, 1, 1, 70, 80, 90]
res = mdamath.triclinic_vectors(box, dtype=dtype)
assert res.shape == (3, 3)
assert res.dtype == dtype
assert np.all(res == 0)
def test_triclinic_vectors_box_cycle(self):
max_error = 0.0
for a in range(10, 91, 10):
for b in range(10, 91, 10):
for g in range(10, 91, 10):
ref = np.array([1, 1, 1, a, b, g], dtype=np.float32)
res = mdamath.triclinic_box(
*mdamath.triclinic_vectors(ref))
if not np.all(res == 0.0):
assert_almost_equal(res, ref, 5)
@pytest.mark.parametrize('angles', ([70, 70, 70],
[70, 70, 90],
[70, 90, 70],
[90, 70, 70],
[70, 90, 90],
[90, 70, 90],
[90, 90, 70]))
def test_triclinic_vectors_box_cycle_exact(self, angles):
# These cycles were inexact prior to PR #2201
ref = np.array([10.1, 10.1, 10.1] + angles, dtype=np.float32)
res = mdamath.triclinic_box(*mdamath.triclinic_vectors(ref))
assert_allclose(res, ref)
@pytest.mark.parametrize('lengths', comb_wr([-1, 0, 1, 2], 3))
@pytest.mark.parametrize('angles',
comb_wr([-10, 0, 20, 70, 90, 120, 180], 3))
def test_triclinic_box(self, lengths, angles):
tri_vecs = self.ref_trivecs_unsafe(lengths + angles)
ref = self.ref_tribox(tri_vecs)
res = mdamath.triclinic_box(*tri_vecs)
assert_array_equal(res, ref)
assert res.dtype == ref.dtype
@pytest.mark.parametrize('lengths', comb_wr([-1, 0, 1, 2], 3))
@pytest.mark.parametrize('angles',
comb_wr([-10, 0, 20, 70, 90, 120, 180], 3))
def test_box_volume(self, lengths, angles):
box = np.array(lengths + angles, dtype=np.float32)
assert_almost_equal(mdamath.box_volume(box),
np.linalg.det(self.ref_trivecs(box)),
decimal=5)
def test_sarrus_det(self):
comb = comb_wr(np.linspace(-133.7, 133.7, num=5), 9)
# test array of matrices:
matrix = np.array(tuple(comb)).reshape((-1, 5, 3, 3))
ref = np.linalg.det(matrix)
res = mdamath.sarrus_det(matrix)
assert_almost_equal(res, ref, 7)
assert ref.dtype == res.dtype == np.float64
# test single matrices:
matrix = matrix.reshape(-1, 3, 3)
ref = ref.ravel()
res = np.array([mdamath.sarrus_det(m) for m in matrix])
assert_almost_equal(res, ref, 7)
assert ref.dtype == res.dtype == np.float64
@pytest.mark.parametrize('shape', ((0,), (3, 2), (2, 3), (1, 1, 3, 1)))
def test_sarrus_det_wrong_shape(self, shape):
matrix = np.zeros(shape)
with pytest.raises(ValueError):
mdamath.sarrus_det(matrix)
class TestMakeWhole(object):
"""Set up a simple system:
+-----------+
| |
| 6 3 | 6
| ! ! | !
|-5-8 1-2-|-5-8
| ! ! | !
| 7 4 | 7
| |
+-----------+
"""
prec = 5
@pytest.fixture()
def universe(self):
universe = mda.Universe(Make_Whole)
bondlist = [(0, 1), (1, 2), (1, 3), (1, 4), (4, 5), (4, 6), (4, 7)]
universe.add_TopologyAttr(Bonds(bondlist))
return universe
def test_return_value(self, universe):
ag = universe.residues[0].atoms
orig_pos = ag.positions.copy()
retval = mdamath.make_whole(ag)
assert retval.dtype == np.float32
assert_array_equal(ag.positions, retval)
assert np.any(ag.positions != orig_pos)
def test_single_atom_no_bonds(self):
# Call make_whole on single atom with no bonds, shouldn't move
u = mda.Universe(Make_Whole)
# Atom0 is isolated
bondlist = [(1, 2), (1, 3), (1, 4), (4, 5), (4, 6), (4, 7)]
u.add_TopologyAttr(Bonds(bondlist))
ag = u.atoms[[0]]
refpos = ag.positions.copy()
mdamath.make_whole(ag)
assert_array_equal(ag.positions, refpos) # must be untouched
def test_empty_ag(self, universe):
ag = mda.AtomGroup([], universe)
retval = mdamath.make_whole(ag)
assert retval.dtype == np.float32
assert_array_equal(retval, np.empty((0, 3), dtype=np.float32))
def test_scrambled_ag(self, universe):
# if order of atomgroup is mixed
ag = universe.atoms[[1, 3, 2, 4, 0, 6, 5, 7]]
mdamath.make_whole(ag)
# artificial system which uses 1nm bonds, so
# largest bond should be 20A
assert ag.bonds.values().max() < 20.1
def test_out_of_place(self, universe):
ag = universe.residues[0].atoms
orig_pos = ag.positions.copy()
mdamath.make_whole(ag, inplace=False)
# positions must be untouched:
assert_array_equal(ag.positions, orig_pos)
def test_double_precision_box(self):
# This test could in principle be removed since PR #2213
# universe with double precision box containing a 2-atom molecule
# broken accross a corner:
u = mda.Universe.empty(
n_atoms=2,
n_residues=1,
n_segments=1,
atom_resindex=[0, 0],
residue_segindex=[0],
trajectory=True,
velocities=False,
forces=False)
ts = u.trajectory.ts
ts.frame = 0
ts.dimensions = [10, 10, 10, 90, 90, 90]
# assert ts.dimensions.dtype == np.float64
# not applicable since #2213
ts.positions = np.array([[1, 1, 1, ], [9, 9, 9]], dtype=np.float32)
u.add_TopologyAttr(Bonds([(0, 1)]))
mdamath.make_whole(u.atoms)
assert_array_almost_equal(u.atoms.positions,
np.array([[1, 1, 1, ], [-1, -1, -1]],
dtype=np.float32))
@staticmethod
@pytest.fixture()
def ag(universe):
return universe.residues[0].atoms
def test_no_bonds(self):
# NoData caused by no bonds
universe = mda.Universe(Make_Whole)
ag = universe.residues[0].atoms
with pytest.raises(NoDataError):
mdamath.make_whole(ag)
def test_zero_box_size(self, universe, ag):
universe.dimensions = [0., 0., 0., 90., 90., 90.]
with pytest.raises(ValueError):
mdamath.make_whole(ag)
def test_wrong_reference_atom(self, universe, ag):
# Reference atom not in atomgroup
with pytest.raises(ValueError):
mdamath.make_whole(ag, reference_atom=universe.atoms[-1])
def test_impossible_solve(self, universe):
# check that the algorithm sees the bad walk
with pytest.raises(ValueError):
mdamath.make_whole(universe.atoms)
def test_solve_1(self, universe, ag):
# regular usage of function
refpos = universe.atoms[:4].positions.copy()
mdamath.make_whole(ag)
assert_array_almost_equal(universe.atoms[:4].positions, refpos)
assert_array_almost_equal(universe.atoms[4].position,
np.array([110.0, 50.0, 0.0]), decimal=self.prec)
assert_array_almost_equal(universe.atoms[5].position,
np.array([110.0, 60.0, 0.0]), decimal=self.prec)
assert_array_almost_equal(universe.atoms[6].position,
np.array([110.0, 40.0, 0.0]), decimal=self.prec)
assert_array_almost_equal(universe.atoms[7].position,
np.array([120.0, 50.0, 0.0]), decimal=self.prec)
def test_solve_2(self, universe, ag):
# use but specify the center atom
refpos = universe.atoms[4:8].positions.copy()
mdamath.make_whole(ag, reference_atom=universe.residues[0].atoms[4])
assert_array_almost_equal(universe.atoms[4:8].positions, refpos)
assert_array_almost_equal(universe.atoms[0].position,
np.array([-20.0, 50.0, 0.0]), decimal=self.prec)
assert_array_almost_equal(universe.atoms[1].position,
np.array([-10.0, 50.0, 0.0]), decimal=self.prec)
assert_array_almost_equal(universe.atoms[2].position,
np.array([-10.0, 60.0, 0.0]), decimal=self.prec)
assert_array_almost_equal(universe.atoms[3].position,
np.array([-10.0, 40.0, 0.0]), decimal=self.prec)
def test_solve_3(self, universe):
# put in a chunk that doesn't need any work
refpos = universe.atoms[:1].positions.copy()
mdamath.make_whole(universe.atoms[:1])
assert_array_almost_equal(universe.atoms[:1].positions, refpos)
def test_solve_4(self, universe):
# Put in only some of a fragment,
# check that not everything gets moved
chunk = universe.atoms[:7]
refpos = universe.atoms[7].position.copy()
mdamath.make_whole(chunk)
assert_array_almost_equal(universe.atoms[7].position, refpos)
assert_array_almost_equal(universe.atoms[4].position,
np.array([110.0, 50.0, 0.0]))
assert_array_almost_equal(universe.atoms[5].position,
np.array([110.0, 60.0, 0.0]))
assert_array_almost_equal(universe.atoms[6].position,
np.array([110.0, 40.0, 0.0]))
def test_double_frag_short_bonds(self, universe, ag):
# previous bug where if two fragments are given
# but all bonds were short, the algorithm didn't
# complain
mdamath.make_whole(ag)
with pytest.raises(ValueError):
mdamath.make_whole(universe.atoms)
def test_make_whole_triclinic(self):
u = mda.Universe(TPR, GRO)
thing = u.select_atoms('not resname SOL NA+')
mdamath.make_whole(thing)
blengths = thing.bonds.values()
assert blengths.max() < 2.0
def test_make_whole_fullerene(self):
# lots of circular bonds as a nice pathological case
u = mda.Universe(fullerene)
bbox = u.atoms.bbox()
u.dimensions = np.r_[bbox[1] - bbox[0], [90]*3]
blengths = u.atoms.bonds.values()
# kaboom
u.atoms[::2].translate([u.dimensions[0], -2 * u.dimensions[1], 0.0])
u.atoms[1::2].translate(
[0.0, 7 * u.dimensions[1], -5 * u.dimensions[2]])
mdamath.make_whole(u.atoms)
assert_array_almost_equal(
u.atoms.bonds.values(), blengths, decimal=self.prec)
def test_make_whole_multiple_molecules(self):
u = mda.Universe(two_water_gro, guess_bonds=True)
for f in u.atoms.fragments:
mdamath.make_whole(f)
assert u.atoms.bonds.values().max() < 2.0
class Class_with_Caches(object):
def __init__(self):
self._cache = dict()
self.ref1 = 1.0
self.ref2 = 2.0
self.ref3 = 3.0
self.ref4 = 4.0
self.ref5 = 5.0
self.ref6 = 6.0
# For universe-validated caches
# One-line lambda-like class
self.universe = type('Universe', (), dict())()
self.universe._cache = {'_valid': {}}
@cached('val1')
def val1(self):
return self.ref1
# Do one with property decorator as these are used together often
@property
@cached('val2')
def val2(self):
return self.ref2
# Check use of property setters
@property
@cached('val3')
def val3(self):
return self.ref3
@val3.setter
def val3(self, new):
self._clear_caches('val3')
self._fill_cache('val3', new)
@val3.deleter
def val3(self):
self._clear_caches('val3')
# Check that args are passed through to underlying functions
@cached('val4')
def val4(self, n1, n2):
return self._init_val_4(n1, n2)
def _init_val_4(self, m1, m2):
return self.ref4 + m1 + m2
# Args and Kwargs
@cached('val5')
def val5(self, n, s=None):
return self._init_val_5(n, s=s)
def _init_val_5(self, n, s=None):
return n * s
# Property decorator and universally-validated cache
@property
@cached('val6', universe_validation=True)
def val6(self):
return self.ref5 + 1.0
# These are designed to mimic the AG and Universe cache methods
def _clear_caches(self, *args):
if len(args) == 0:
self._cache = dict()
else:
for name in args:
try:
del self._cache[name]
except KeyError:
pass
def _fill_cache(self, name, value):
self._cache[name] = value
class TestCachedDecorator(object):
@pytest.fixture()
def obj(self):
return Class_with_Caches()
def test_val1_lookup(self, obj):
obj._clear_caches()
assert 'val1' not in obj._cache
assert obj.val1() == obj.ref1
ret = obj.val1()
assert 'val1' in obj._cache
assert obj._cache['val1'] == ret
assert obj.val1() is obj._cache['val1']
def test_val1_inject(self, obj):
# Put something else into the cache and check it gets returned
# this tests that the cache is blindly being used
obj._clear_caches()
ret = obj.val1()
assert 'val1' in obj._cache
assert ret == obj.ref1
new = 77.0
obj._fill_cache('val1', new)
assert obj.val1() == new
# Managed property
def test_val2_lookup(self, obj):
obj._clear_caches()
assert 'val2' not in obj._cache
assert obj.val2 == obj.ref2
ret = obj.val2
assert 'val2' in obj._cache
assert obj._cache['val2'] == ret
def test_val2_inject(self, obj):
obj._clear_caches()
ret = obj.val2
assert 'val2' in obj._cache
assert ret == obj.ref2
new = 77.0
obj._fill_cache('val2', new)
assert obj.val2 == new
# Setter on cached attribute
def test_val3_set(self, obj):
obj._clear_caches()
assert obj.val3 == obj.ref3
new = 99.0
obj.val3 = new
assert obj.val3 == new
assert obj._cache['val3'] == new
def test_val3_del(self, obj):
# Check that deleting the property removes it from cache,
obj._clear_caches()
assert obj.val3 == obj.ref3
assert 'val3' in obj._cache
del obj.val3
assert 'val3' not in obj._cache
# But allows it to work as usual afterwards
assert obj.val3 == obj.ref3
assert 'val3' in obj._cache
# Pass args
def test_val4_args(self, obj):
obj._clear_caches()
assert obj.val4(1, 2) == 1 + 2 + obj.ref4
# Further calls should yield the old result
# this arguably shouldn't be cached...
assert obj.val4(3, 4) == 1 + 2 + obj.ref4
# Pass args and kwargs
def test_val5_kwargs(self, obj):
obj._clear_caches()
assert obj.val5(5, s='abc') == 5 * 'abc'
assert obj.val5(5, s='!!!') == 5 * 'abc'
# property decorator, with universe validation
def test_val6_universe_validation(self, obj):
obj._clear_caches()
assert not hasattr(obj, '_cache_key')
assert 'val6' not in obj._cache
assert 'val6' not in obj.universe._cache['_valid']
ret = obj.val6 # Trigger caching
assert obj.val6 == obj.ref6
assert ret is obj.val6
assert 'val6' in obj._cache
assert 'val6' in obj.universe._cache['_valid']
assert obj._cache_key in obj.universe._cache['_valid']['val6']
assert obj._cache['val6'] is ret
# Invalidate cache at universe level
obj.universe._cache['_valid']['val6'].clear()
ret2 = obj.val6
assert ret2 is obj.val6
assert ret2 is not ret
# Clear obj cache and access again
obj._clear_caches()
ret3 = obj.val6
assert ret3 is obj.val6
assert ret3 is not ret2
assert ret3 is not ret
class TestConvFloat(object):
@pytest.mark.parametrize('s, output', [
('0.45', 0.45),
('.45', 0.45),
('a.b', 'a.b')
])
def test_float(self, s, output):
assert util.conv_float(s) == output
@pytest.mark.parametrize('input, output', [
(('0.45', '0.56', '6.7'), [0.45, 0.56, 6.7]),
(('0.45', 'a.b', '!!'), [0.45, 'a.b', '!!'])
])
def test_map(self, input, output):
ret = [util.conv_float(el) for el in input]
assert ret == output
class TestFixedwidthBins(object):
def test_keys(self):
ret = util.fixedwidth_bins(0.5, 1.0, 2.0)
for k in ['Nbins', 'delta', 'min', 'max']:
assert k in ret
def test_ValueError(self):
with pytest.raises(ValueError):
util.fixedwidth_bins(0.1, 5.0, 4.0)
@pytest.mark.parametrize(
'delta, xmin, xmax, output_Nbins, output_delta, output_min, output_max',
[
(0.1, 4.0, 5.0, 10, 0.1, 4.0, 5.0),
(0.4, 4.0, 5.0, 3, 0.4, 3.9, 5.1)
])
def test_usage(self, delta, xmin, xmax, output_Nbins, output_delta,
output_min, output_max):
ret = util.fixedwidth_bins(delta, xmin, xmax)
assert ret['Nbins'] == output_Nbins
assert ret['delta'] == output_delta
assert ret['min'], output_min
assert ret['max'], output_max
@pytest.fixture
def atoms():
from MDAnalysisTests import make_Universe
u = make_Universe(extras=("masses",), size=(3, 1, 1))
return u.atoms
@pytest.mark.parametrize('weights,result',
[
(None, None),
("mass", np.array([5.1, 4.2, 3.3])),
(np.array([12.0, 1.0, 12.0]),
np.array([12.0, 1.0, 12.0])),
([12.0, 1.0, 12.0], np.array([12.0, 1.0, 12.0])),
(range(3), np.arange(3, dtype=int)),
])
def test_check_weights_ok(atoms, weights, result):
assert_array_equal(util.get_weights(atoms, weights), result)
@pytest.mark.parametrize('weights',
[42,
"geometry",
np.array(1.0),
])
def test_check_weights_raises_ValueError(atoms, weights):
with pytest.raises(ValueError):
util.get_weights(atoms, weights)
@pytest.mark.parametrize('weights',
[
np.array([12.0, 1.0, 12.0, 1.0]),
[12.0, 1.0],
np.array([[12.0, 1.0, 12.0]]),
np.array([[12.0, 1.0, 12.0], [12.0, 1.0, 12.0]]),
])
def test_check_weights_raises_ValueError(atoms, weights):
with pytest.raises(ValueError):
util.get_weights(atoms, weights)
class TestGuessFormat(object):
"""Test guessing of format from filenames
Tests also getting the appropriate Parser and Reader from a
given filename
"""
# list of known formats, followed by the desired Parser and Reader
# None indicates that there isn't a Reader for this format
# All formats call fallback to the MinimalParser
formats = [
('CHAIN', mda.topology.MinimalParser.MinimalParser,
mda.coordinates.chain.ChainReader),
('CONFIG', mda.topology.DLPolyParser.ConfigParser,
mda.coordinates.DLPoly.ConfigReader),
('CRD', mda.topology.CRDParser.CRDParser, mda.coordinates.CRD.CRDReader),
('DATA', mda.topology.LAMMPSParser.DATAParser,
mda.coordinates.LAMMPS.DATAReader),
('DCD', mda.topology.MinimalParser.MinimalParser,
mda.coordinates.DCD.DCDReader),
('DMS', mda.topology.DMSParser.DMSParser, mda.coordinates.DMS.DMSReader),
('GMS', mda.topology.GMSParser.GMSParser, mda.coordinates.GMS.GMSReader),
('GRO', mda.topology.GROParser.GROParser, mda.coordinates.GRO.GROReader),
('HISTORY', mda.topology.DLPolyParser.HistoryParser,
mda.coordinates.DLPoly.HistoryReader),
('INPCRD', mda.topology.MinimalParser.MinimalParser,
mda.coordinates.INPCRD.INPReader),
('LAMMPS', mda.topology.MinimalParser.MinimalParser,
mda.coordinates.LAMMPS.DCDReader),
('MDCRD', mda.topology.MinimalParser.MinimalParser,
mda.coordinates.TRJ.TRJReader),
('MMTF', mda.topology.MMTFParser.MMTFParser,
mda.coordinates.MMTF.MMTFReader),
('MOL2', mda.topology.MOL2Parser.MOL2Parser,
mda.coordinates.MOL2.MOL2Reader),
('NC', mda.topology.MinimalParser.MinimalParser,
mda.coordinates.TRJ.NCDFReader),
('NCDF', mda.topology.MinimalParser.MinimalParser,
mda.coordinates.TRJ.NCDFReader),
('PDB', mda.topology.PDBParser.PDBParser, mda.coordinates.PDB.PDBReader),
('PDBQT', mda.topology.PDBQTParser.PDBQTParser,
mda.coordinates.PDBQT.PDBQTReader),
('PRMTOP', mda.topology.TOPParser.TOPParser, None),
('PQR', mda.topology.PQRParser.PQRParser, mda.coordinates.PQR.PQRReader),
('PSF', mda.topology.PSFParser.PSFParser, None),
('RESTRT', mda.topology.MinimalParser.MinimalParser,
mda.coordinates.INPCRD.INPReader),
('TOP', mda.topology.TOPParser.TOPParser, None),
('TPR', mda.topology.TPRParser.TPRParser, None),
('TRJ', mda.topology.MinimalParser.MinimalParser,
mda.coordinates.TRJ.TRJReader),
('TRR', mda.topology.MinimalParser.MinimalParser,
mda.coordinates.TRR.TRRReader),
('XML', mda.topology.HoomdXMLParser.HoomdXMLParser, None),
('XPDB', mda.topology.ExtendedPDBParser.ExtendedPDBParser,
mda.coordinates.PDB.ExtendedPDBReader),
('XTC', mda.topology.MinimalParser.MinimalParser,
mda.coordinates.XTC.XTCReader),
('XYZ', mda.topology.XYZParser.XYZParser, mda.coordinates.XYZ.XYZReader),
('TRZ', mda.topology.MinimalParser.MinimalParser,
mda.coordinates.TRZ.TRZReader),
]
# list of possible compressed extensions
# include no extension too!
compressed_extensions = ['.bz2', '.gz']
@pytest.mark.parametrize('extention',
[format_tuple[0].upper() for format_tuple in
formats] +
[format_tuple[0].lower() for format_tuple in
formats])
def test_get_extention(self, extention):
"""Check that get_ext works"""
file_name = 'file.{0}'.format(extention)
a, b = util.get_ext(file_name)
assert a == 'file'
assert b == extention.lower()
@pytest.mark.parametrize('extention',
[format_tuple[0].upper() for format_tuple in
formats] +
[format_tuple[0].lower() for format_tuple in
formats])
def test_compressed_without_compression_extention(self, extention):
"""Check that format suffixed by compressed extension works"""
file_name = 'file.{0}'.format(extention)
a = util.format_from_filename_extension(file_name)
# expect answer to always be uppercase
assert a == extention.upper()
@pytest.mark.parametrize('extention',
[format_tuple[0].upper() for format_tuple in
formats] +
[format_tuple[0].lower() for format_tuple in
formats])
@pytest.mark.parametrize('compression_extention', compressed_extensions)
def test_compressed(self, extention, compression_extention):
"""Check that format suffixed by compressed extension works"""
file_name = 'file.{0}{1}'.format(extention, compression_extention)
a = util.format_from_filename_extension(file_name)
# expect answer to always be uppercase
assert a == extention.upper()
@pytest.mark.parametrize('extention',
[format_tuple[0].upper() for format_tuple in
formats] + [format_tuple[0].lower() for
format_tuple in formats])
def test_guess_format(self, extention):
file_name = 'file.{0}'.format(extention)
a = util.guess_format(file_name)
# expect answer to always be uppercase
assert a == extention.upper()
@pytest.mark.parametrize('extention',
[format_tuple[0].upper() for format_tuple in
formats] + [format_tuple[0].lower() for
format_tuple in formats])
@pytest.mark.parametrize('compression_extention', compressed_extensions)
def test_guess_format_compressed(self, extention, compression_extention):
file_name = 'file.{0}{1}'.format(extention, compression_extention)
a = util.guess_format(file_name)
# expect answer to always be uppercase
assert a == extention.upper()
@pytest.mark.parametrize('extention, parser',
[(format_tuple[0], format_tuple[1]) for
format_tuple in formats if
format_tuple[1] is not None]
)
def test_get_parser(self, extention, parser):
file_name = 'file.{0}'.format(extention)
a = mda.topology.core.get_parser_for(file_name)
assert a == parser
@pytest.mark.parametrize('extention, parser',
[(format_tuple[0], format_tuple[1]) for
format_tuple in formats if
format_tuple[1] is not None]
)
@pytest.mark.parametrize('compression_extention', compressed_extensions)
def test_get_parser_compressed(self, extention, parser,
compression_extention):
file_name = 'file.{0}{1}'.format(extention, compression_extention)
a = mda.topology.core.get_parser_for(file_name)
assert a == parser
@pytest.mark.parametrize('extention',
[(format_tuple[0], format_tuple[1]) for
format_tuple in formats if
format_tuple[1] is None]
)
def test_get_parser_invalid(self, extention):
file_name = 'file.{0}'.format(extention)
with pytest.raises(ValueError):
mda.topology.core.get_parser_for(file_name)
@pytest.mark.parametrize('extention, reader',
[(format_tuple[0], format_tuple[2]) for
format_tuple in formats if
format_tuple[2] is not None]
)
def test_get_reader(self, extention, reader):
file_name = 'file.{0}'.format(extention)
a = mda.coordinates.core.get_reader_for(file_name)
assert a == reader
@pytest.mark.parametrize('extention, reader',
[(format_tuple[0], format_tuple[2]) for
format_tuple in formats if
format_tuple[2] is not None]
)
@pytest.mark.parametrize('compression_extention', compressed_extensions)
def test_get_reader_compressed(self, extention, reader,
compression_extention):
file_name = 'file.{0}{1}'.format(extention, compression_extention)
a = mda.coordinates.core.get_reader_for(file_name)
assert a == reader
@pytest.mark.parametrize('extention',
[(format_tuple[0], format_tuple[2]) for
format_tuple in formats if
format_tuple[2] is None]
)
def test_get_reader_invalid(self, extention):
file_name = 'file.{0}'.format(extention)
with pytest.raises(ValueError):
mda.coordinates.core.get_reader_for(file_name)
def test_check_compressed_format_TypeError(self):
with pytest.raises(TypeError):
util.check_compressed_format(1234, 'bz2')
def test_format_from_filename_TypeError(self):
with pytest.raises(TypeError):
util.format_from_filename_extension(1234)
def test_guess_format_stream_ValueError(self):
# This stream has no name, so can't guess format
s = StringIO('this is a very fun file')
with pytest.raises(ValueError):
util.guess_format(s)
def test_from_ndarray(self):
fn = np.zeros((3, 3))
rd = mda.coordinates.core.get_reader_for(fn)
assert rd == mda.coordinates.memory.MemoryReader
class TestUniqueRows(object):
def test_unique_rows_2(self):
a = np.array([[0, 1], [1, 2], [2, 1], [0, 1], [0, 1], [2, 1]])
assert_array_equal(util.unique_rows(a),
np.array([[0, 1], [1, 2], [2, 1]]))
def test_unique_rows_3(self):
a = np.array([[0, 1, 2], [0, 1, 2], [2, 3, 4], [0, 1, 2]])
assert_array_equal(util.unique_rows(a),
np.array([[0, 1, 2], [2, 3, 4]]))
def test_unique_rows_with_view(self):
# unique_rows doesn't work when flags['OWNDATA'] is False,
# happens when second dimension is created through broadcast
a = np.array([1, 2])
assert_array_equal(util.unique_rows(a[None, :]),
np.array([[1, 2]]))
class TestGetWriterFor(object):
def test_no_filename_argument(self):
# Does ``get_writer_for`` fails as expected when provided no
# filename arguments
with pytest.raises(TypeError):
mda.coordinates.core.get_writer_for()
def test_precedence(self):
writer = mda.coordinates.core.get_writer_for('test.pdb', 'GRO')
assert writer == mda.coordinates.GRO.GROWriter
# Make sure ``get_writer_for`` uses *format* if provided
def test_missing_extension(self):
# Make sure ``get_writer_for`` behave as expected if *filename*
# has no extension
with pytest.raises(ValueError):
mda.coordinates.core.get_writer_for(filename='test', format=None)
def test_extension_empty_string(self):
"""
Test format=''.
Raises TypeError because format can be only None or
valid formats.
"""
with pytest.raises(ValueError):
mda.coordinates.core.get_writer_for(filename='test', format='')
def test_file_no_extension(self):
"""No format given"""
with pytest.raises(ValueError):
mda.coordinates.core.get_writer_for('outtraj')
def test_wrong_format(self):
# Make sure ``get_writer_for`` fails if the format is unknown
with pytest.raises(TypeError):
mda.coordinates.core.get_writer_for(filename="fail_me",
format='UNK')
def test_compressed_extension(self):
for ext in ('.gz', '.bz2'):
fn = 'test.gro' + ext
writer = mda.coordinates.core.get_writer_for(filename=fn)
assert writer == mda.coordinates.GRO.GROWriter
# Make sure ``get_writer_for`` works with compressed file file names
def test_compressed_extension_fail(self):
for ext in ('.gz', '.bz2'):
fn = 'test.unk' + ext
# Make sure ``get_writer_for`` fails if an unknown format is compressed
with pytest.raises(TypeError):
mda.coordinates.core.get_writer_for(filename=fn)
def test_non_string_filename(self):
# Does ``get_writer_for`` fails with non string filename, no format
with pytest.raises(ValueError):
mda.coordinates.core.get_writer_for(filename=StringIO(),
format=None)
def test_multiframe_failure(self):
# does ``get_writer_for`` fail with invalid format and multiframe not None
with pytest.raises(TypeError):
mda.coordinates.core.get_writer_for(filename="fail_me",
format='UNK', multiframe=True)
mda.coordinates.core.get_writer_for(filename="fail_me",
format='UNK', multiframe=False)
def test_multiframe_nonsense(self):
with pytest.raises(ValueError):
mda.coordinates.core.get_writer_for(filename='this.gro',
multiframe='sandwich')
formats = [
# format name, related class, singleframe, multiframe
('CRD', mda.coordinates.CRD.CRDWriter, True, False),
('DATA', mda.coordinates.LAMMPS.DATAWriter, True, False),
('DCD', mda.coordinates.DCD.DCDWriter, True, True),
# ('ENT', mda.coordinates.PDB.PDBWriter, True, False),
('GRO', mda.coordinates.GRO.GROWriter, True, False),
('LAMMPS', mda.coordinates.LAMMPS.DCDWriter, True, True),
('MOL2', mda.coordinates.MOL2.MOL2Writer, True, True),
('NCDF', mda.coordinates.TRJ.NCDFWriter, True, True),
('NULL', mda.coordinates.null.NullWriter, True, True),
# ('PDB', mda.coordinates.PDB.PDBWriter, True, True), special case, done separately
('PDBQT', mda.coordinates.PDBQT.PDBQTWriter, True, False),
('PQR', mda.coordinates.PQR.PQRWriter, True, False),
('TRR', mda.coordinates.TRR.TRRWriter, True, True),
('XTC', mda.coordinates.XTC.XTCWriter, True, True),
('XYZ', mda.coordinates.XYZ.XYZWriter, True, True),
('TRZ', mda.coordinates.TRZ.TRZWriter, True, True),
]
@pytest.mark.parametrize('format, writer',
[(format_tuple[0], format_tuple[1]) for
format_tuple in formats if
format_tuple[2] is True])
def test_singleframe(self, format, writer):
assert mda.coordinates.core.get_writer_for('this', format=format,
multiframe=False) == writer
@pytest.mark.parametrize('format', [(format_tuple[0], format_tuple[1]) for
format_tuple in formats if
format_tuple[2] is False])
def test_singleframe_fails(self, format):
with pytest.raises(TypeError):
mda.coordinates.core.get_writer_for('this', format=format,
multiframe=False)
@pytest.mark.parametrize('format, writer',
[(format_tuple[0], format_tuple[1]) for
format_tuple in formats if
format_tuple[3] is True])
def test_multiframe(self, format, writer):
assert mda.coordinates.core.get_writer_for('this', format=format,
multiframe=True) == writer
@pytest.mark.parametrize('format',
[format_tuple[0] for format_tuple in formats if
format_tuple[3] is False])
def test_multiframe_fails(self, format):
with pytest.raises(TypeError):
mda.coordinates.core.get_writer_for('this', format=format,
multiframe=True)
def test_get_writer_for_pdb(self):
assert mda.coordinates.core.get_writer_for('this', format='PDB',
multiframe=False) == mda.coordinates.PDB.PDBWriter
assert mda.coordinates.core.get_writer_for('this', format='PDB',
multiframe=True) == mda.coordinates.PDB.MultiPDBWriter
assert mda.coordinates.core.get_writer_for('this', format='ENT',
multiframe=False) == mda.coordinates.PDB.PDBWriter
assert mda.coordinates.core.get_writer_for('this', format='ENT',
multiframe=True) == mda.coordinates.PDB.MultiPDBWriter
class TestBlocksOf(object):
def test_blocks_of_1(self):
arr = np.arange(16).reshape(4, 4)
view = util.blocks_of(arr, 1, 1)
assert view.shape == (4, 1, 1)
assert_array_almost_equal(view,
np.array([[[0]], [[5]], [[10]], [[15]]]))
# Change my view, check changes are reflected in arr
view[:] = 1001
assert_array_almost_equal(arr,
np.array([[1001, 1, 2, 3],
[4, 1001, 6, 7],
[8, 9, 1001, 11],
[12, 13, 14, 1001]]))
def test_blocks_of_2(self):
arr = np.arange(16).reshape(4, 4)
view = util.blocks_of(arr, 2, 2)
assert view.shape == (2, 2, 2)
assert_array_almost_equal(view, np.array([[[0, 1], [4, 5]],
[[10, 11], [14, 15]]]))
view[0] = 100
view[1] = 200
assert_array_almost_equal(arr,
np.array([[100, 100, 2, 3],
[100, 100, 6, 7],
[8, 9, 200, 200],
[12, 13, 200, 200]]))
def test_blocks_of_3(self):
# testing non square array
arr = np.arange(32).reshape(8, 4)
view = util.blocks_of(arr, 2, 1)
assert view.shape == (4, 2, 1)
def test_blocks_of_4(self):
# testing block exceeding array size results in empty view
arr = np.arange(4).reshape(2, 2)
view = util.blocks_of(arr, 3, 3)
assert view.shape == (0, 3, 3)
view[:] = 100
assert_array_equal(arr, np.arange(4).reshape(2, 2))
def test_blocks_of_ValueError(self):
arr = np.arange(16).reshape(4, 4)
with pytest.raises(ValueError):
util.blocks_of(arr, 2, 1) # blocks don't fit
with pytest.raises(ValueError):
util.blocks_of(arr[:, ::2], 2, 1) # non-contiguous input
@pytest.mark.parametrize('arr,answer', [
([2, 3, 4, 7, 8, 9, 10, 15, 16], [[2, 3, 4], [7, 8, 9, 10], [15, 16]]),
([11, 12, 13, 14, 15, 16], [[11, 12, 13, 14, 15, 16]]),
([1, 2, 2, 2, 3, 6], [[1, 2, 2, 2, 3], [6]])
])
def test_group_same_or_consecutive_integers(arr, answer):
assert_equal(util.group_same_or_consecutive_integers(arr), answer)
class TestNamespace(object):
@staticmethod
@pytest.fixture()
def ns():
return util.Namespace()
def test_getitem(self, ns):
ns.this = 42
assert ns['this'] == 42
def test_getitem_KeyError(self, ns):
with pytest.raises(KeyError):
dict.__getitem__(ns, 'this')
def test_setitem(self, ns):
ns['this'] = 42
assert ns['this'] == 42
def test_delitem(self, ns):
ns['this'] = 42
assert 'this' in ns
del ns['this']
assert 'this' not in ns
def test_delitem_AttributeError(self, ns):
with pytest.raises(AttributeError):
del ns.this
def test_setattr(self, ns):
ns.this = 42
assert ns.this == 42
def test_getattr(self, ns):
ns['this'] = 42
assert ns.this == 42
def test_getattr_AttributeError(self, ns):
with pytest.raises(AttributeError):
getattr(ns, 'this')
def test_delattr(self, ns):
ns['this'] = 42
assert 'this' in ns
del ns.this
assert 'this' not in ns
def test_eq(self, ns):
ns['this'] = 42
ns2 = util.Namespace()
ns2['this'] = 42
assert ns == ns2
def test_len(self, ns):
assert len(ns) == 0
ns['this'] = 1
ns['that'] = 2
assert len(ns) == 2
def test_iter(self, ns):
ns['this'] = 12
ns['that'] = 24
ns['other'] = 48
seen = []
for val in ns:
seen.append(val)
for val in ['this', 'that', 'other']:
assert val in seen
class TestTruncateInteger(object):
@pytest.mark.parametrize('a, b', [
((1234, 1), 4),
((1234, 2), 34),
((1234, 3), 234),
((1234, 4), 1234),
((1234, 5), 1234),
])
def test_ltruncate_int(self, a, b):
assert util.ltruncate_int(*a) == b
class TestFlattenDict(object):
def test_flatten_dict(self):
d = {
'A': {1: ('a', 'b', 'c')},
'B': {2: ('c', 'd', 'e')},
'C': {3: ('f', 'g', 'h')}
}
result = util.flatten_dict(d)
for k in result:
assert type(k) == tuple
assert len(k) == 2
assert k[0] in d
assert k[1] in d[k[0]]
assert result[k] in d[k[0]].values()
class TestStaticVariables(object):
"""Tests concerning the decorator @static_variables
"""
def test_static_variables(self):
x = [0]
@static_variables(foo=0, bar={'test': x})
def myfunc():
assert myfunc.foo == 0
assert type(myfunc.bar) is type(dict())
if 'test2' not in myfunc.bar:
myfunc.bar['test2'] = "a"
else:
myfunc.bar['test2'] += "a"
myfunc.bar['test'][0] += 1
return myfunc.bar['test']
assert hasattr(myfunc, 'foo')
assert hasattr(myfunc, 'bar')
y = myfunc()
assert y is x
assert x[0] == 1
assert myfunc.bar['test'][0] == 1
assert myfunc.bar['test2'] == "a"
x = [0]
y = myfunc()
assert y is not x
assert myfunc.bar['test'][0] == 2
assert myfunc.bar['test2'] == "aa"
class TestWarnIfNotUnique(object):
"""Tests concerning the decorator @warn_if_not_uniue
"""
def warn_msg(self, func, group, group_name):
msg = ("{}.{}(): {} {} contains duplicates. Results might be "
"biased!".format(group.__class__.__name__, func.__name__,
group_name, group.__repr__()))
return msg
def test_warn_if_not_unique(self, atoms):
# Check that the warn_if_not_unique decorator has a "static variable"
# warn_if_not_unique.warned:
assert hasattr(warn_if_not_unique, 'warned')
assert warn_if_not_unique.warned is False
def test_warn_if_not_unique_once_outer(self, atoms):
# Construct a scenario with two nested functions, each one decorated
# with @warn_if_not_unique:
@warn_if_not_unique
def inner(group):
if not group.isunique:
# The inner function should not trigger a warning, and the state
# of warn_if_not_unique.warned should reflect that:
assert warn_if_not_unique.warned is True
return 0
@warn_if_not_unique
def outer(group):
return inner(group)
# Check that no warning is raised for a unique group:
assert atoms.isunique
with pytest.warns(None) as w:
x = outer(atoms)
assert x == 0
assert not w.list
# Check that a warning is raised for a group with duplicates:
ag = atoms + atoms[0]
msg = self.warn_msg(outer, ag, "'ag'")
with pytest.warns(DuplicateWarning) as w:
assert warn_if_not_unique.warned is False
x = outer(ag)
# Assert that the "warned" state is restored:
assert warn_if_not_unique.warned is False
# Check correct function execution:
assert x == 0
# Only one warning must have been raised:
assert len(w) == 1
# For whatever reason pytest.warns(DuplicateWarning, match=msg)
# doesn't work, so we compare the recorded warning message instead:
assert w[0].message.args[0] == msg
# Make sure the warning uses the correct stacklevel and references
# this file instead of MDAnalysis/lib/util.py:
assert w[0].filename == __file__
def test_warned_state_restored_on_failure(self, atoms):
# A decorated function raising an exception:
@warn_if_not_unique
def thisfails(group):
raise ValueError()
ag = atoms + atoms[0]
msg = self.warn_msg(thisfails, ag, "'ag'")
with pytest.warns(DuplicateWarning) as w:
assert warn_if_not_unique.warned is False
with pytest.raises(ValueError):
thisfails(ag)
# Assert that the "warned" state is restored despite `thisfails`
# raising an exception:
assert warn_if_not_unique.warned is False
assert len(w) == 1
assert w[0].message.args[0] == msg
assert w[0].filename == __file__
def test_warn_if_not_unique_once_inner(self, atoms):
# Construct a scenario with two nested functions, each one decorated
# with @warn_if_not_unique, but the outer function adds a duplicate
# to the group:
@warn_if_not_unique
def inner(group):
return 0
@warn_if_not_unique
def outer(group):
dupgroup = group + group[0]
return inner(dupgroup)
# Check that even though outer() is called the warning is raised for
# inner():
msg = self.warn_msg(inner, atoms + atoms[0], "'dupgroup'")
with pytest.warns(DuplicateWarning) as w:
assert warn_if_not_unique.warned is False
x = outer(atoms)
# Assert that the "warned" state is restored:
assert warn_if_not_unique.warned is False
# Check correct function execution:
assert x == 0
# Only one warning must have been raised:
assert len(w) == 1
assert w[0].message.args[0] == msg
assert w[0].filename == __file__
def test_warn_if_not_unique_multiple_references(self, atoms):
ag = atoms + atoms[0]
aag = ag
aaag = aag
@warn_if_not_unique
def func(group):
return group.isunique
# Check that the warning message contains the names of all references to
# the group in alphabetic order:
msg = self.warn_msg(func, ag, "'aaag' a.k.a. 'aag' a.k.a. 'ag'")
with pytest.warns(DuplicateWarning) as w:
x = func(ag)
# Assert that the "warned" state is restored:
assert warn_if_not_unique.warned is False
# Check correct function execution:
assert x is False
# Check warning message:
assert w[0].message.args[0] == msg
# Check correct file referenced:
assert w[0].filename == __file__
def test_warn_if_not_unique_unnamed(self, atoms):
@warn_if_not_unique
def func(group):
pass
msg = self.warn_msg(func, atoms + atoms[0],
"'unnamed {}'".format(atoms.__class__.__name__))
with pytest.warns(DuplicateWarning) as w:
func(atoms + atoms[0])
# Check warning message:
assert w[0].message.args[0] == msg
def test_warn_if_not_unique_fails_for_non_groupmethods(self):
@warn_if_not_unique
def func(group):
pass
class dummy(object):
pass
with pytest.raises(AttributeError):
func(dummy())
def test_filter_duplicate_with_userwarning(self, atoms):
@warn_if_not_unique
def func(group):
pass
with warnings.catch_warnings(record=True) as record:
warnings.resetwarnings()
warnings.filterwarnings("ignore", category=UserWarning)
with pytest.warns(None) as w:
func(atoms)
assert not w.list
assert len(record) == 0
class TestCheckCoords(object):
"""Tests concerning the decorator @check_coords
"""
prec = 6
def test_default_options(self):
a_in = np.zeros(3, dtype=np.float32)
b_in = np.ones(3, dtype=np.float32)
b_in2 = np.ones((2, 3), dtype=np.float32)
@check_coords('a', 'b')
def func(a, b):
# check that enforce_copy is True by default:
assert a is not a_in
assert b is not b_in
# check that convert_single is True by default:
assert a.shape == (1, 3)
assert b.shape == (1, 3)
return a + b
# check that allow_single is True by default:
res = func(a_in, b_in)
# check that reduce_result_if_single is True by default:
assert res.shape == (3,)
# check correct function execution:
assert_array_equal(res, b_in)
# check that check_lenghts_match is True by default:
with pytest.raises(ValueError):
res = func(a_in, b_in2)
def test_enforce_copy(self):
a_2d = np.ones((1, 3), dtype=np.float32)
b_1d = np.zeros(3, dtype=np.float32)
c_2d = np.zeros((1, 6), dtype=np.float32)[:, ::2]
d_2d = np.zeros((1, 3), dtype=np.int64)
@check_coords('a', 'b', 'c', 'd', enforce_copy=False)
def func(a, b, c, d):
# Assert that if enforce_copy is False:
# no copy is made if input shape, order, and dtype are correct:
assert a is a_2d
# a copy is made if input shape has to be changed:
assert b is not b_1d
# a copy is made if input order has to be changed:
assert c is not c_2d
# a copy is made if input dtype has to be changed:
assert d is not d_2d
# Assert correct dtype conversion:
assert d.dtype == np.float32
assert_almost_equal(d, d_2d, self.prec)
# Assert all shapes are converted to (1, 3):
assert a.shape == b.shape == c.shape == d.shape == (1, 3)
return a + b + c + d
# Call func() to:
# - test the above assertions
# - ensure that input of single coordinates is simultaneously possible
# with different shapes (3,) and (1, 3)
res = func(a_2d, b_1d, c_2d, d_2d)
# Since some inputs are not 1d, even though reduce_result_if_single is
# True, the result must have shape (1, 3):
assert res.shape == (1, 3)
# check correct function execution:
assert_array_equal(res, a_2d)
def test_no_allow_single(self):
@check_coords('a', allow_single=False)
def func(a):
pass
with pytest.raises(ValueError) as err:
func(np.zeros(3, dtype=np.float32))
assert err.msg == ("func(): a.shape must be (n, 3), got (3,).")
def test_no_convert_single(self):
a_1d = np.arange(-3, 0, dtype=np.float32)
@check_coords('a', enforce_copy=False, convert_single=False)
def func(a):
# assert no conversion and no copy were performed:
assert a is a_1d
return a
res = func(a_1d)
# Assert result has been reduced:
assert res == a_1d[0]
assert type(res) is np.float32
def test_no_reduce_result_if_single(self):
a_1d = np.zeros(3, dtype=np.float32)
# Test without shape conversion:
@check_coords('a', enforce_copy=False, convert_single=False,
reduce_result_if_single=False)
def func(a):
return a
res = func(a_1d)
# make sure the input array is just passed through:
assert res is a_1d
# Test with shape conversion:
@check_coords('a', enforce_copy=False, reduce_result_if_single=False)
def func(a):
return a
res = func(a_1d)
assert res.shape == (1, 3)
assert_array_equal(res[0], a_1d)
def test_no_check_lengths_match(self):
a_2d = np.zeros((1, 3), dtype=np.float32)
b_2d = np.zeros((3, 3), dtype=np.float32)
@check_coords('a', 'b', enforce_copy=False, check_lengths_match=False)
def func(a, b):
return a, b
res_a, res_b = func(a_2d, b_2d)
# Assert arrays are just passed through:
assert res_a is a_2d
assert res_b is b_2d
def test_invalid_input(self):
a_inv_dtype = np.array([['hello', 'world', '!']])
a_inv_type = [[0., 0., 0.]]
a_inv_shape_1d = np.zeros(6, dtype=np.float32)
a_inv_shape_2d = np.zeros((3, 2), dtype=np.float32)
@check_coords('a')
def func(a):
pass
with pytest.raises(TypeError) as err:
func(a_inv_dtype)
assert err.msg.startswith("func(): a.dtype must be convertible to "
"float32, got ")
with pytest.raises(TypeError) as err:
func(a_inv_type)
assert err.msg == ("func(): Parameter 'a' must be a numpy.ndarray, "
"got <class 'list'>.")
with pytest.raises(ValueError) as err:
func(a_inv_shape_1d)
assert err.msg == ("func(): a.shape must be (3,) or (n, 3), got "
"(6,).")
with pytest.raises(ValueError) as err:
func(a_inv_shape_2d)
assert err.msg == ("func(): a.shape must be (3,) or (n, 3), got "
"(3, 2).")
def test_usage_with_kwargs(self):
a_2d = np.zeros((1, 3), dtype=np.float32)
@check_coords('a', enforce_copy=False)
def func(a, b, c=0):
return a, b, c
# check correct functionality if passed as keyword argument:
a, b, c = func(a=a_2d, b=0, c=1)
assert a is a_2d
assert b == 0
assert c == 1
def test_wrong_func_call(self):
@check_coords('a', enforce_copy=False)
def func(a, b, c=0):
pass
# Make sure invalid call marker is present:
func._invalid_call = False
# usage with posarg doubly defined:
assert not func._invalid_call
with pytest.raises(TypeError):
func(0, a=0) # pylint: disable=redundant-keyword-arg
assert func._invalid_call
func._invalid_call = False
# usage with missing posargs:
assert not func._invalid_call
with pytest.raises(TypeError):
func(0)
assert func._invalid_call
func._invalid_call = False
# usage with missing posargs (supplied as kwargs):
assert not func._invalid_call
with pytest.raises(TypeError):
func(a=0, c=1)
assert func._invalid_call
func._invalid_call = False
# usage with too many posargs:
assert not func._invalid_call
with pytest.raises(TypeError):
func(0, 0, 0, 0)
assert func._invalid_call
func._invalid_call = False
# usage with unexpected kwarg:
assert not func._invalid_call
with pytest.raises(TypeError):
func(a=0, b=0, c=1, d=1) # pylint: disable=unexpected-keyword-arg
assert func._invalid_call
func._invalid_call = False
def test_wrong_decorator_usage(self):
# usage without parantheses:
@check_coords
def func():
pass
with pytest.raises(TypeError):
func()
# usage without arguments:
with pytest.raises(ValueError) as err:
@check_coords()
def func():
pass
assert err.msg == ("Decorator check_coords() cannot be used "
"without positional arguments.")
# usage with defaultarg:
with pytest.raises(ValueError) as err:
@check_coords('a')
def func(a=1):
pass
assert err.msg == ("In decorator check_coords(): Name 'a' doesn't "
"correspond to any positional argument of the "
"decorated function func().")
# usage with invalid parameter name:
with pytest.raises(ValueError) as err:
@check_coords('b')
def func(a):
pass
assert err.msg == ("In decorator check_coords(): Name 'b' doesn't "
"correspond to any positional argument of the "
"decorated function func().")
@pytest.mark.parametrize("old_name", (None, "MDAnalysis.Universe"))
@pytest.mark.parametrize("new_name", (None, "Multiverse"))
@pytest.mark.parametrize("remove", (None, "99.0.0", 2099))
@pytest.mark.parametrize("message", (None, "use the new stuff"))
def test_deprecate(old_name, new_name, remove, message, release="2.7.1"):
def AlternateUniverse(anything):
# important: first line needs to be """\ so that textwrap.dedent()
# works
"""\
AlternateUniverse provides a true view of the Universe.
Parameters
----------
anything : object
Returns
-------
truth
"""
return True
oldfunc = util.deprecate(AlternateUniverse, old_name=old_name,
new_name=new_name,
release=release, remove=remove,
message=message)
# match_expr changed to match (Issue 2329)
with pytest.warns(DeprecationWarning, match="`.+` is deprecated"):
oldfunc(42)
doc = oldfunc.__doc__
name = old_name if old_name else AlternateUniverse.__name__
deprecation_line_1 = ".. deprecated:: {0}".format(release)
assert re.search(deprecation_line_1, doc)
if message:
deprecation_line_2 = message
else:
if new_name is None:
default_message = "`{0}` is deprecated!".format(name)
else:
default_message = "`{0}` is deprecated, use `{1}` instead!".format(
name, new_name)
deprecation_line_2 = default_message
assert re.search(deprecation_line_2, doc)
if remove:
deprecation_line_3 = "`{0}` will be removed in release {1}".format(
name, remove)
assert re.search(deprecation_line_3, doc)
# check that the old docs are still present
assert re.search(textwrap.dedent(AlternateUniverse.__doc__), doc)
def test_deprecate_missing_release_ValueError():
with pytest.raises(ValueError):
util.deprecate(mda.Universe)
def test_set_function_name(name="bar"):
def foo():
pass
util._set_function_name(foo, name)
assert foo.__name__ == name
@pytest.mark.parametrize("text",
("",
"one line text",
" one line with leading space",
"multiline\n\n with some\n leading space",
" multiline\n\n with all\n leading space"))
def test_dedent_docstring(text):
doc = util.dedent_docstring(text)
for line in doc.splitlines():
assert line == line.lstrip()
class TestCheckBox(object):
prec = 6
ref_ortho = np.ones(3, dtype=np.float32)
ref_tri_vecs = np.array([[1, 0, 0], [0, 1, 0], [0, 2 ** 0.5, 2 ** 0.5]],
dtype=np.float32)
@pytest.mark.parametrize('box',
([1, 1, 1, 90, 90, 90],
(1, 1, 1, 90, 90, 90),
['1', '1', 1, 90, '90', '90'],
('1', '1', 1, 90, '90', '90'),
np.array(['1', '1', 1, 90, '90', '90']),
np.array([1, 1, 1, 90, 90, 90],
dtype=np.float32),
np.array([1, 1, 1, 90, 90, 90],
dtype=np.float64),
np.array([1, 1, 1, 1, 1, 1,
90, 90, 90, 90, 90, 90],
dtype=np.float32)[::2]))
def test_check_box_ortho(self, box):
boxtype, checked_box = util.check_box(box)
assert boxtype == 'ortho'
assert_allclose(checked_box, self.ref_ortho)
assert checked_box.dtype == np.float32
assert checked_box.flags['C_CONTIGUOUS']
def test_check_box_None(self):
with pytest.raises(ValueError, match="Box is None"):
util.check_box(None)
@pytest.mark.parametrize('box',
([1, 1, 2, 45, 90, 90],
(1, 1, 2, 45, 90, 90),
['1', '1', 2, 45, '90', '90'],
('1', '1', 2, 45, '90', '90'),
np.array(['1', '1', 2, 45, '90', '90']),
np.array([1, 1, 2, 45, 90, 90],
dtype=np.float32),
np.array([1, 1, 2, 45, 90, 90],
dtype=np.float64),
np.array([1, 1, 1, 1, 2, 2,
45, 45, 90, 90, 90, 90],
dtype=np.float32)[::2]))
def test_check_box_tri_vecs(self, box):
boxtype, checked_box = util.check_box(box)
assert boxtype == 'tri_vecs'
assert_almost_equal(checked_box, self.ref_tri_vecs, self.prec)
assert checked_box.dtype == np.float32
assert checked_box.flags['C_CONTIGUOUS']
def test_check_box_wrong_data(self):
with pytest.raises(ValueError):
wrongbox = ['invalid', 1, 1, 90, 90, 90]
boxtype, checked_box = util.check_box(wrongbox)
def test_check_box_wrong_shape(self):
with pytest.raises(ValueError):
wrongbox = np.ones((3, 3), dtype=np.float32)
boxtype, checked_box = util.check_box(wrongbox)
|
MDAnalysis/mdanalysis
|
testsuite/MDAnalysisTests/lib/test_util.py
|
Python
|
gpl-2.0
| 76,830
|
[
"LAMMPS",
"MDAnalysis"
] |
dc99860b9a5f3d3f5bcee39ee7d6be638d78bfa6438d2e0250e5f4f17a768f91
|
import numpy as np, SimPEG as simpeg, vtk
import vtk.util.numpy_support as npsup
# Simple write model functions.
def writeVTPFile(fileName,vtkPolyObject):
'''Function to write vtk polydata file (vtp).'''
polyWriter = vtk.vtkXMLPolyDataWriter()
if float(vtk.VTK_VERSION.split('.')[0]) >=6:
polyWriter.SetInputData(vtkPolyObject)
else:
polyWriter.SetInput(vtkPolyObject)
polyWriter.SetFileName(fileName)
polyWriter.Update()
def writeVTUFile(fileName,vtkUnstructuredGrid,compress=True):
'''Function to write vtk unstructured grid (vtu).'''
Writer = vtk.vtkXMLUnstructuredGridWriter()
if float(vtk.VTK_VERSION.split('.')[0]) >=6:
Writer.SetInputData(vtkUnstructuredGrid)
else:
Writer.SetInput(vtkUnstructuredGrid)
if not compress:
Writer.SetCompressorTypeToNone()
Writer.SetDataModeToAscii()
Writer.SetFileName(fileName)
Writer.Update()
def writeVTRFile(fileName,vtkRectilinearGrid):
'''Function to write vtk rectilinear grid (vtr).'''
Writer = vtk.vtkXMLRectilinearGridWriter()
if float(vtk.VTK_VERSION.split('.')[0]) >=6:
Writer.SetInputData(vtkRectilinearGrid)
else:
Writer.SetInput(vtkRectilinearGrid)
Writer.SetFileName(fileName)
Writer.Update()
def writeVTSFile(fileName,vtkStructuredGrid):
'''Function to write vtk structured grid (vts).'''
Writer = vtk.vtkXMLStructuredGridWriter()
if float(vtk.VTK_VERSION.split('.')[0]) >=6:
Writer.SetInputData(vtkStructuredGrid)
else:
Writer.SetInput(vtkStructuredGrid)
Writer.SetFileName(fileName)
Writer.Update()
def readVTSFile(fileName):
'''Function to read vtk structured grid (vts) and return a grid object.'''
Reader = vtk.vtkXMLStructuredGridReader()
Reader.SetFileName(fileName)
Reader.Update()
return Reader.GetOutput()
def readVTUFile(fileName):
'''Function to read vtk structured grid (vtu) and return a grid object.'''
Reader = vtk.vtkXMLUnstructuredGridReader()
Reader.SetFileName(fileName)
Reader.Update()
return Reader.GetOutput()
def readVTRFile(fileName):
'''Function to read vtk structured grid (vtr) and return a grid object.'''
Reader = vtk.vtkXMLRectilinearGridReader()
Reader.SetFileName(fileName)
Reader.Update()
return Reader.GetOutput()
def readVTPFile(fileName):
'''Function to read vtk structured grid (vtp) and return a grid object.'''
Reader = vtk.vtkXMLPolyDataReader()
Reader.SetFileName(fileName)
Reader.Update()
return Reader.GetOutput()
|
grosenkj/telluricpy
|
telluricpy/vtkTools/io.py
|
Python
|
mit
| 2,581
|
[
"VTK"
] |
1a4330846330786eea298aee97f95ba29516c4b793f8387f8a6bc4c0e3db8a67
|
import pytest
from FeedUnit42 import Client, get_indicators_command, fetch_indicators, sort_report_objects_by_type, parse_reports, \
match_relationships, parse_related_indicators, create_mitre_indicator
from test_data.feed_data import INDICATORS_DATA, ATTACK_PATTERN_DATA, MALWARE_DATA, RELATIONSHIP_DATA, REPORTS_DATA, \
REPORTS_INDICATORS, MATCHED_RELATIONSHIPS, ID_TO_OBJECT
@pytest.mark.parametrize('command, args, response, length', [
(get_indicators_command, {'limit': 2}, INDICATORS_DATA, 2),
(get_indicators_command, {'limit': 5}, INDICATORS_DATA, 5),
]) # noqa: E124
def test_commands(command, args, response, length, mocker):
"""Unit test
Given
- get_indicators_command func
- command args
- command raw response
When
- mock the Client's get_stix_objects.
Then
- convert the result to human readable table
- create the context
validate the raw_response
"""
client = Client(api_key='1234', verify=False)
mocker.patch.object(client, 'fetch_stix_objects_from_api', return_value=response)
command_results = command(client, args)
indicators = command_results.raw_response
assert len(indicators) == length
TYPE_TO_RESPONSE = {
'indicator': INDICATORS_DATA,
'report': REPORTS_DATA,
'attack-pattern': ATTACK_PATTERN_DATA,
'malware': MALWARE_DATA,
'campaign': [],
'relationship': RELATIONSHIP_DATA,
'course-of-action': []
}
def test_fetch_indicators_command(mocker):
"""Unit test
Given
- fetch incidents command
- command args
- command raw response
When
- mock the Client's get_stix_objects.
Then
- run the fetch incidents command using the Client
Validate the amount of indicators fetched
"""
def mock_get_stix_objects(test, **kwargs):
type_ = kwargs.get('type')
client.objects_data[type_] = TYPE_TO_RESPONSE[type_]
client = Client(api_key='1234', verify=False)
mocker.patch.object(client, 'fetch_stix_objects_from_api', side_effect=mock_get_stix_objects)
indicators = fetch_indicators(client)
assert len(indicators) == 13
def test_feed_tags_param(mocker):
"""Unit test
Given
- fetch incidents command
- command args
- command raw response
When
- mock the feed tags param.
- mock the Client's get_stix_objects.
Then
- run the fetch incidents command using the Client
Validate The value of the tags field.
"""
def mock_get_stix_objects(test, **kwargs):
type_ = kwargs.get('type')
client.objects_data[type_] = TYPE_TO_RESPONSE[type_]
client = Client(api_key='1234', verify=False)
mocker.patch.object(client, 'fetch_stix_objects_from_api', side_effect=mock_get_stix_objects)
indicators = fetch_indicators(client, ['test_tag'])
assert set(indicators[0].get('fields').get('tags')) == {'malicious-activity', 'test_tag'}
def test_fetch_indicators_with_feedrelatedindicators(mocker):
"""Unit test
Given
- fetch incidents command
- command args
- command raw response
When
- mock the Client's get_stix_objects.
Then
- run the fetch incidents command using the Client
Validate the connections in between the indicators
"""
def mock_get_stix_objects(test, **kwargs):
type_ = kwargs.get('type')
client.objects_data[type_] = TYPE_TO_RESPONSE[type_]
client = Client(api_key='1234', verify=False)
mocker.patch.object(client, 'fetch_stix_objects_from_api', side_effect=mock_get_stix_objects)
indicators = fetch_indicators(client)
for indicator in indicators:
indicator_fields = indicator.get('fields')
if indicator_fields.get('indicatoridentification') == 'indicator--010bb9ad-5686-485d-97e5-93c2187e56ce':
assert indicator_fields.get('feedrelatedindicators') == [
{
'description': 'example.com,https://attack.mitre.org/techniques/T1047,https://msdn.microsoft.com'
'/en-us/library/aa394582.aspx,https://technet.microsoft.com/en-us/library/cc787851'
'.aspx,https://en.wikipedia.org/wiki/Server_Message_Block',
'type': 'MITRE ATT&CK',
'value': 'T1047'}
]
break
def test_fetch_indicators_with_malware_reference(mocker):
"""Unit test
Given
- fetch incidents command
- command args
- command raw response
When
- mock the Client's get_stix_objects.
Then
- run the fetch incidents command using the Client
Validate the connections in between the indicators
"""
def mock_get_stix_objects(test, **kwargs):
type_ = kwargs.get('type')
client.objects_data[type_] = TYPE_TO_RESPONSE[type_]
client = Client(api_key='1234', verify=False)
mocker.patch.object(client, 'fetch_stix_objects_from_api', side_effect=mock_get_stix_objects)
indicators = fetch_indicators(client)
for indicator in indicators:
indicator_fields = indicator.get('fields')
if indicator_fields.get('indicatoridentification') == 'indicator--0025039e-f0b5-4ad2-aaab-5374fe3734be':
assert set(indicator_fields.get('malwarefamily')) == {'Muirim', 'XBash', 'Muirim2'}
break
def test_sort_reports():
"""
Given
- List of raw report objects.
When
- Parsing STIX Report indicators.
Then
- Sort the object into two types: main and sub.
"""
assert sort_report_objects_by_type(REPORTS_DATA) == ([REPORTS_DATA[0]], [REPORTS_DATA[1]])
@pytest.mark.parametrize('report, tags, tlp_color, expected',
[
(REPORTS_DATA[0], [], None, REPORTS_INDICATORS[0]),
(REPORTS_DATA[0], [], 'AMBER', REPORTS_INDICATORS[1])
])
def test_parse_reports(report, tags, tlp_color, expected):
"""
Given
- List of main raw report objects.
When
- Parsing STIX Report indicators.
Then
- Create a STIX Report indicator.
"""
assert parse_reports([report], tags, tlp_color) == expected
def test_parse_reports_relationships(mocker):
"""
Given
- STIX Report indicators.
- Relationship objects.
- Malware and Attack-Pattern objects.
When
- Parsing STIX Report indicators.
Then
- Update a STIX Report indicator with relationships' data.
"""
def mock_get_stix_objects(test, **kwargs):
type_ = kwargs.get('type')
client.objects_data[type_] = TYPE_TO_RESPONSE[type_]
client = Client(api_key='1234', verify=False)
mocker.patch.object(client, 'fetch_stix_objects_from_api', side_effect=mock_get_stix_objects)
indicators = fetch_indicators(client)
for indicator in indicators:
indicator_fields = indicator.get('fields')
if indicator_fields.get('stixid') == 'report--a':
assert set([i.get('value') for i in indicator_fields.get('feedrelatedindicators')]) == \
{'T1047', 'XBash', 'c1ec28bc82500bd70f95edcbdf9306746198bbc04a09793ca69bb87f2abdb839'}
break
def test_match_relationships():
"""
Given
- Relationship objects.
When
- Parsing indicators.
Then
- Creates a dict of relationship in the form of `id: [related_ids]`
"""
assert match_relationships(RELATIONSHIP_DATA) == (MATCHED_RELATIONSHIPS,
{'course-of-action--fd0da09e-a0b2-4018-9476-1a7edd809b59': 'No product'})
def test_parse_related_indicators():
"""
Given
- Stix report object.
- Malware objects ids related to the report.
- Dict in the form of `id: stix_object`.
When
- Parsing related indicator from Stix report object.
Then
- Creates indicator and update the feedrelatedindicators field in the report.
"""
report = {'fields': {'feedrelatedindicators': []}}
indicators = parse_related_indicators(report, ['attack-pattern--01a5a209-b94c-450b-b7f9-946497d91055'],
ID_TO_OBJECT, {}, {})
assert len(report['fields']['feedrelatedindicators']) == 1
assert report['fields']['feedrelatedindicators'][0]['value'] == '8.8.8.8'
assert len(indicators) == 1
assert indicators[0]['value'] == '8.8.8.8'
assert indicators[0]['fields']['mitrecourseofaction'] == 'No courses of action found.'
assert indicators[0]['fields']['mitredescription'] == 'description'
assert indicators[0]['fields']['mitrename'] == 'Software Discovery'
def test_create_mitre_indicator():
"""
Given
- Indicator value.
- Stix relationship object.
- Dict of relationships in the form of `id: list(related_ids)`.
- Dict in the form of `id: stix_object`.
- Dict Connects courses of action id with the relationship product.
When
- Parsing the indicator.
Then
- Creates indicator and update the mitrecourseofaction field with markdown table.
"""
indicator = create_mitre_indicator('8.8.8.8',
{'id': 'attack-pattern--01a5a209-b94c-450b-b7f9-946497d91055'},
MATCHED_RELATIONSHIPS,
ID_TO_OBJECT,
{'course-of-action--fd0da09e-a0b2-4018-9476-1a7edd809b59': 'NGFW'})
assert indicator['value'] == '8.8.8.8'
assert indicator['type'] == 'MITRE ATT&CK'
assert indicator['fields']['mitrecourseofaction'] == '\n### NGFW\n|Name|Title|Description|\n|---|---|---|' \
'\n| Deploy XSOAR Playbook | Deploy XSOAR Playbook |' \
' Deploy XSOAR Playbook - Phishing Investigation - Generic V2 |\n'
|
VirusTotal/content
|
Packs/FeedUnit42/Integrations/FeedUnit42/FeedUnit42_test.py
|
Python
|
mit
| 9,974
|
[
"Amber"
] |
ac02382ce6bba9e289723d9813a77b5228f5805999e2e17b0a3bfdeea550a101
|
#!/usr/bin/env python
# Title: Twitter Stream to Kafka
# Create-Date: 23. Oktober 2016
# Version: 1.0
# Author: Cyrill Durrer / Christoph Haene
# Contact: http://cyrilldurrer.com
# christoph.haene@gmail.com
#
# Task: Receive Twitter Stream for Bitcoin and send to Kafka
#
# Output: Kafka Producer
#
# Kafka: Create first an topic in Kafka (Path to Kafka: /usr/hdp/2.4.0.0-169/kafka/bin/)
# kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic twitterstream
##############################################################################################################################
import tweepy
import threading, logging, time
from kafka import KafkaProducer
import string
######################################################################
# Authentication details. To obtain these visit dev.twitter.com
######################################################################
consumer_key = 'insert here your Twitter Key'
consumer_secret = 'insert here your Twitter Secret'
access_token = 'insert here your Twitter Token'
access_token_secret = 'insert here your Twitter token secret'
mytopic='twitterstream' # Topic should be the same as defined in Kafka
######################################################################
#Create a handler for the streaming data that stays open...
######################################################################
class StdOutListener(tweepy.StreamListener):
#Handler
''' Handles data received from the stream. '''
######################################################################
#For each status event
######################################################################
def on_status(self, status):
# Schema changed to add the tweet text
print '%d,%d,%d,%s,%s' % (status.user.followers_count, status.user.friends_count,status.user.statuses_count, status.text, status.user.screen_name)
message = str(status.user.followers_count) + ',' + str(status.user.friends_count) + ',' + str(status.user.statuses_count) + ',' + status.text.replace(",","\,") + ',' + status.user.screen_name + ',' + status.user.lang + ',' + status.user.location.replace(",","\,") + ',' + str(status.created_at)
msg = filter(lambda x: x in string.printable, message)
try:
#write out to kafka topic
producer.send(mytopic, str(msg))
except Exception, e:
return True
return True
######################################################################
#Supress Failure to keep script running...
######################################################################
def on_error(self, status_code):
print('Got an error with status code: ' + str(status_code))
return True # To continue listening
def on_timeout(self):
print('Timeout...')
return True # To continue listening
######################################################################
# Keep the script running, because of failure NONETYPE is nul()
######################################################################
def start_stream():
while True:
try:
stream = tweepy.Stream(auth, listener)
# api = tweepy.API(auth)
# Stream Twitter Messages with bitcoin or btc
stream.filter(track=['bitcoin', 'btc'])
except:
continue
######################################################################
#Main Loop Init
######################################################################
if __name__ == '__main__':
listener = StdOutListener()
#sign oath cert
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# Kafka-Producer Konfig
producer = KafkaProducer(bootstrap_servers='spark1:6667')
# Start Streaming
start_stream()
|
chaene82/bigcoin
|
twitter2kafka/twitter2kafka.py
|
Python
|
gpl-3.0
| 4,091
|
[
"VisIt"
] |
019f3dda46ef08f6fc07063c21b345fc76894c74f11e629f64a729021f3ffe25
|
"""
@name: Modules/Housing/Schedules/auto_update.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2013-2019 by D. Brian Kimmel
@note: Created on Dec 31, 2013
@license: MIT License
@summary: Handle the automatic updating of PyHouse
This module automatically updates PyHouse
"""
__updated__ = '2019-10-06'
__version_info__ = (19, 5, 1)
__version__ = '.'.join(map(str, __version_info__))
# strategy:
#
# if there is a VERSION file, use its contents. otherwise, call git to
# get a version string. if that also fails, use 'latest'.
#
# Import system type stuff
import jsonpickle
# from twisted.web.client import getPage
from twisted.internet import ssl, task, protocol, endpoints
from twisted.internet.defer import Deferred, inlineCallbacks # , succeed
from twisted.internet.protocol import ClientFactory, Factory, Protocol
from twisted.protocols.basic import LineReceiver
from twisted.python.filepath import FilePath
from twisted.python.modules import getModule
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
# Import PyHouse files
from Modules.Core import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.Auto_Update ')
VERSION_PATH = '../../../../../VERSION'
REPOSITORY = b'https://raw.github.com/DBrianKimmel/PyHouse/Project/Version'
REPOSITORY_PATH = 'Project/VERSION'
def _find_pyhouse_version_file():
"""
Find the normalized VERSION file name
PyHouse/Project/src/VERSION
"""
l_file = FilePath(VERSION_PATH)
return l_file
class FindLocalVersion(object):
""" Find out what version that we are.
"""
def __init__(self):
l_name = self.get_filename()
_l_version = self.get_version(l_name)
def get_filename(self):
return FilePath(VERSION_PATH)
def get_version(self, p_name):
"""
@return: a bytestream of the version
"""
l_file = p_name.open()
l_version = l_file.read()
return l_version
class GithubProtocol(Protocol):
""" A minimal protocol for the Hue Hub.
"""
m_finished = None
m_remaining = 0
def __init__(self, p_finished):
"""
@param p_finished: is a deferred that ????
"""
self.m_finished = p_finished
self.m_remaining = 1024 * 10 # Allow for 10kb response
def dataReceived(self, p_bytes):
if self.m_remaining > 0:
l_display = p_bytes[:self.m_remaining].decode("utf8") # Get the string
l_json = jsonpickle.decode(l_display)
LOG.debug('\n===== Body =====\n{}\n'.format(l_json))
print('\n===== Body =====\n{}\n'.format(l_json))
self.m_remaining -= len(l_display)
def connectionLost(self, p_reason):
l_msg = p_reason.getErrorMessage() # this gives a tuple of messages (I think)
LOG.debug('Finished receiving body: {}'.format(p_reason))
print('Finished receiving body: {}'.format(p_reason))
LOG.debug('Finished receiving body: {}'.format("\t".join(str(x) for x in l_msg)))
print('Finished receiving body: {}'.format("\t".join(str(x) for x in l_msg)))
self.m_finished.callback(None)
class FindRepositoryVersion(object):
""" Check the version in the repository
"""
def __init__(self, p_pyhouse_obj):
"""
Agent is a very basic HTTP client. It supports I{HTTP} and I{HTTPS} scheme URIs.
"""
self.m_headers = Headers({'User-Agent': ['AutoUpdate Web Client']})
if p_pyhouse_obj != None:
self.m_pyhouse_obj = p_pyhouse_obj
self.m_hue_agent = Agent(p_pyhouse_obj._Twisted.Reactor)
LOG.info('Initialized')
print('Initialized')
self.m_version = '0.0.0'
def get_uri(self):
return REPOSITORY
def get_repository(self):
""" Issue a request for information
It will arrive later via a deferred.
"""
def cb_Response(p_response):
LOG.debug('Response Code: {} {}'.format(p_response.code, p_response.phrase))
print('Response Code: {} {}'.format(p_response.code, p_response.phrase))
d_finished = Deferred()
p_response.deliverBody(GithubProtocol(d_finished))
return d_finished
l_agent_d = self.m_hue_agent.request(
b'GET',
self.get_uri(),
self.m_headers,
None)
l_agent_d.addCallback(cb_Response)
# HueDecode().decode_get()
return l_agent_d
def get_file(self):
l_file = self.get_repository()
return l_file
def print_page(self, p_html):
"""
"""
print(p_html)
pass
def get_page(self):
"""
"""
l_defer = self.get_uri
l_defer.addCallback(self.print_page)
def get_version(self):
return self.m_version
def parseHtml(self, p_html):
# l_parser = etree.HTMLParser(encoding='utf8')
# tree = etree.parse(StringIO.StringIO(html), parser)
# return tree
pass
def extractTitle(self, p_tree):
# titleText = unicode(tree.xpath("//title/text()")[0])
# return titleText
pass
# d = getPage('http://www.google.com')
# d.addCallback(parseHtml)
# d.addCallback(extraTitle)
# d.addBoth(println)
class lightingUtility(object):
"""
"""
def compare_versions(self, _p_local_ver, _p_repos_ver):
return True
class EchoClient(LineReceiver):
end = b"Bye-bye!"
def connectionMade(self):
self.sendLine(b"Hello, world!")
self.sendLine(b"What a fine day it is.")
self.sendLine(self.end)
def lineReceived(self, line):
print("receive:", line)
if line == self.end:
self.transport.loseConnection()
class EchoClientFactory(ClientFactory):
protocol = EchoClient
def __init__(self):
self.done = Deferred()
def clientConnectionFailed(self, connector, reason):
print('connection failed:', reason.getErrorMessage())
self.done.errback(reason)
def clientConnectionLost(self, connector, reason):
print('connection lost:', reason.getErrorMessage())
self.done.callback(None)
class Api(lightingUtility):
"""
"""
@inlineCallbacks
def do_ssl(self, p_reactor):
l_factory = Factory.forProtocol(self.EchoClient)
l_certData = getModule(__name__).filePath.sibling('public.pem').getContent()
l_authority = ssl.Certificate.loadPEM(l_certData)
l_options = ssl.optionsForClientTLS(u'example.com', l_authority)
l_endpoint = endpoints.SSL4ClientEndpoint(p_reactor, 'localhost', 8000, l_options)
echoClient = yield l_endpoint.connect(l_factory)
d_done = Deferred()
echoClient.connectionLost = lambda reason: d_done.callback(None)
yield d_done
def Start(self, p_pyhouse_obj):
self.m_pyhouse_obj = p_pyhouse_obj
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/House/Schedule/auto_update.py
|
Python
|
mit
| 7,001
|
[
"Brian"
] |
80476d5c670d904a4a15ca7364e6650d34a37b9117a09341df3b3d3645cef383
|
# This script chooses the last two nucleotides of allele-specific primer
import argparse,re
import glob
from Bio.Seq import Seq
import os,sys
import primer3
from operator import itemgetter
from Bio.SeqUtils import MeltingTemp as mt
import subprocess as sp
import xlsxwriter as xls
import myvariant
thisDir=os.path.dirname(os.path.realpath(__file__))+'/'
def showPercWork(done,allWork):
percDoneWork=round((done/allWork)*100,2)
sys.stdout.write("\r"+str(percDoneWork)+"%")
sys.stdout.flush()
def revComplement(nuc):
return(str(Seq(nuc).reverse_complement()))
def constructPrimers(mutPrimerSeq,wtPrimerSeq,seq,ampliconLen,ampliconLenDev,primerLen,primerLenDev,meltTemp,meltTempDev,dimerdg,needMinAmpl):
# First of all choose the left primers. It's got already defined 3'-end
primers=[]
primerProps={}
primerScores={}
for i in range(primerLen-primerLenDev,primerLen+primerLenDev+1):
mutPrimer=mutPrimerSeq[0][-i:]
tm1=mt.Tm_Wallace(mutPrimer,strict=False)
homodG1=primer3.calcHomodimer(mutPrimer).dg
hairpindG1=primer3.calcHairpin(mutPrimer).dg
for z in range(primerLen-primerLenDev,primerLen+primerLenDev+1):
wtPrimer=wtPrimerSeq[-z:]
tm2=mt.Tm_Wallace(wtPrimer,strict=False)
homodG2=primer3.calcHomodimer(wtPrimer).dg
hairpindG2=primer3.calcHairpin(wtPrimer).dg
for j in range(ampliconLen-ampliconLenDev,ampliconLen+ampliconLenDev+1):
rPrimerStart=seq.index(wtPrimer[:-2])+j
if rPrimerStart>len(seq):
print('ERROR! The length of input sequence is not enough for construction of R-primer!')
print('Input sequence length:',len(seq))
print('Position of substitution:',seq.index(wtPrimer)-len(wtPrimer))
print('Maximal amplicon length:',ampliconLen+ampliconLenDev)
exit(0)
for k in range(primerLen-primerLenDev,primerLen+primerLenDev+1):
rPrimer=revComplement(seq[rPrimerStart-k:rPrimerStart+1])
tm3=mt.Tm_Wallace(rPrimer,strict=False)
try:
homodG3=primer3.calcHomodimer(rPrimer).dg
except OSError:
homodG3=0
try:
heterodG1=primer3.calcHeterodimer(rPrimer,mutPrimer).dg
except OSError:
heterodG1=0
try:
heterodG2=primer3.calcHeterodimer(rPrimer,wtPrimer).dg
except OSError:
heterodG2=0
try:
hairpindG3=primer3.calcHairpin(rPrimer).dg
except OSError:
hairpindG3=0
primers.append([mutPrimer,wtPrimer,rPrimer])
wtPrimerStart=seq.index(wtPrimer[:-2])
mutPrimerStart=seq.index(mutPrimer[:-2])
primerProps['_'.join(primers[-1])]=[len(mutPrimer),len(wtPrimer),len(rPrimer),j,tm1,tm2,tm3,homodG1,homodG2,homodG3,hairpindG1,hairpindG2,hairpindG3,heterodG1,heterodG2,mutPrimerStart,wtPrimerStart,rPrimerStart-k]
if needMinAmpl:
primerScores['_'.join(primers[-1])]=j/2+20*(abs(tm1-meltTemp)+abs(tm2-meltTemp)+abs(tm3-meltTemp)+abs(tm1-tm2)+abs(tm2-tm3)+abs(tm1-tm3))+5*(min(homodG1,dimerdg)+min(homodG2,dimerdg)+min(homodG3,dimerdg)+min(heterodG1,dimerdg)+min(heterodG2,dimerdg))/(5*dimerdg)+(min(hairpindG1,dimerdg)+min(hairpindG2,dimerdg)+min(hairpindG3,dimerdg))/(3*dimerdg)
else:
primerScores['_'.join(primers[-1])]=20*(abs(tm1-meltTemp)+abs(tm2-meltTemp)+abs(tm3-meltTemp)+abs(tm1-tm2)+abs(tm2-tm3)+abs(tm1-tm3))+5*(min(homodG1,dimerdg)+min(homodG2,dimerdg)+min(homodG3,dimerdg)+min(heterodG1,dimerdg)+min(heterodG2,dimerdg))/(5*dimerdg)+(min(hairpindG1,dimerdg)+min(hairpindG2,dimerdg)+min(hairpindG3,dimerdg))/(3*dimerdg)
# We search for two groups of primers: 1) that match parameters that user applied (bestMatch); 2) that have the best possible parameters
# We may do not get 1st group, but the 2nd one we get always
theBest=None
bestMatch=None
for key,value in sorted(primerScores.items(),key=itemgetter(1),reverse=False):
if theBest is None:
theBest=[key,value,primerProps[key]]
if ((primerProps[key][4]<=meltTemp+meltTempDev and primerProps[key][5]<=meltTemp+meltTempDev and primerProps[key][6]<=meltTemp+meltTempDev
and primerProps[key][4]>=meltTemp-meltTempDev and primerProps[key][5]>=meltTemp-meltTempDev and primerProps[key][6]>=meltTemp-meltTempDev
and primerProps[key][7]>=dimerdg and primerProps[key][8]>=dimerdg and primerProps[key][9]>=dimerdg
and primerProps[key][10]>=dimerdg and primerProps[key][11]>=dimerdg and primerProps[key][12]>=dimerdg
and primerProps[key][13]>=dimerdg and primerProps[key][14]>=dimerdg) and bestMatch is None):
bestMatch=[key,value,primerProps[key]]
return(bestMatch,theBest)
def chooseBestPrimers(seq,seqNames,ampliconLen,ampliconLenDev,primerLen,primerLenDev,meltTemp,meltTempDev,dimerdg,needMinAmpl):
if '[' in seq:
if ']' in seq:
lBracket='['
rBracket=']'
else:
print('#######\nERROR! There is incorrect designation of alternative alleles!\n'
'It should lool like [A/G] or (A/G)',seq[seq.find(lBracket):seq.find(rBracket)+1],'\n#######')
exit(0)
elif '(' in seq:
if ')' in seq:
lBracket='('
rBracket=')'
else:
print('#######\nERROR! There is incorrect designation of alternative alleles!\n'
'It should lool like [A/G] or (A/G)',seq[seq.find(lBracket):seq.find(rBracket)+1],'\n#######')
exit(0)
else:
print('#######\nERROR! There is incorrect designation of alternative alleles!\n'
'It should lool like [A/G] or (A/G)',seq[seq.find(lBracket):seq.find(rBracket)+1],'\n#######')
exit(0)
ref=seq[seq.find(lBracket)+1:seq.find('/')]
alt=seq[seq.find('/')+1:seq.find(rBracket)]
if len(ref)!=1 or len(alt)!=1:
print('#######\nERROR! There is incorrect designation of alternative alleles!\n'
'It should lool like [A/G] or (A/G). But now it is ',seq[seq.find(lBracket):seq.find(rBracket)+1],'\n#######')
exit(0)
pos=seq.find(lBracket)
seqRef=seq.replace(seq[seq.find(lBracket):seq.find(rBracket)+1],ref)
seqAlt=seq.replace(seq[seq.find(lBracket):seq.find(rBracket)+1],alt)
maxVal=0
primerVals=[]
primerEnds=[]
# MAMA-primer on the plus strand for mutant allele
for key,item in mama[seqRef[pos-1:pos+1]][seqAlt[pos-1:pos+1]].items():
primerEnds.append([key,1])
primerVals.append(item)
# MAMA-primer on the minus strand for mutant allele
for key,item in mama[revComplement(seqRef[pos:pos+2])][revComplement(seqAlt[pos:pos+2])].items():
primerEnds.append([key,-1])
primerVals.append(item)
wtPrimerVals=[]
wtPrimerEnds=[]
# MAMA-primer on the plus strand for mutant allele
for key,item in mama[seqAlt[pos-1:pos+1]][seqRef[pos-1:pos+1]].items():
wtPrimerEnds.append([key,1])
wtPrimerVals.append(item)
# MAMA-primer on the minus strand for mutant allele
for key,item in mama[revComplement(seqAlt[pos:pos+2])][revComplement(seqRef[pos:pos+2])].items():
wtPrimerEnds.append([key,-1])
wtPrimerVals.append(item)
bestPrimers=[]
plus40=[]
wtPrimers=[]
bestMatchPrimers=[]
for pe,wtpe in zip(primerEnds,wtPrimerEnds):
if pe[1]>0: # if strand is plus
bestPrimers.append([seqRef[max(pos-primerLen-primerLenDev,0):pos-1]+pe[0],1])
wtPrimers.append(seqRef[max(pos-primerLen-primerLenDev,0):pos-1]+wtpe[0])
bestMatch,theBest=constructPrimers(bestPrimers[-1],wtPrimers[-1],seqRef,ampliconLen,ampliconLenDev,primerLen,primerLenDev,meltTemp,meltTempDev,dimerdg,needMinAmpl)
bestMatchPrimers.append([bestMatch,theBest])
plus40.append(seqRef[pos+1:min(len(seqRef),pos+primerLen+primerLenDev+1)])
else: # if strand is minus
bestPrimers.append([revComplement(seqRef[pos+2:min(len(seqRef),pos+primerLen+primerLenDev)])+pe[0],-1])
wtPrimers.append(revComplement(seqRef[pos+2:min(len(seqRef),pos+primerLen+primerLenDev)])+wtpe[0])
bestMatch,theBest=constructPrimers(bestPrimers[-1],wtPrimers[-1],revComplement(seqRef),ampliconLen,ampliconLenDev,primerLen,primerLenDev,meltTemp,meltTempDev,dimerdg,needMinAmpl)
# Because this is the minus strand, we should recalculate positions of primers
if bestMatch:
bestMatch[2][-3]=len(seqRef)-bestMatch[2][-3]-bestMatch[2][0]+1
bestMatch[2][-2]=len(seqRef)-bestMatch[2][-2]-bestMatch[2][1]+1
bestMatch[2][-1]=len(seqRef)-bestMatch[2][-1]-bestMatch[2][2]+1
theBest[2][-3]=len(seqRef)-theBest[2][-3]-theBest[2][0]+1
theBest[2][-2]=len(seqRef)-theBest[2][-2]-theBest[2][1]+1
theBest[2][-1]=len(seqRef)-theBest[2][-1]-theBest[2][2]+1
bestMatchPrimers.append([bestMatch,theBest])
plus40.append(revComplement(seqRef[max(pos-primerLen-primerLenDev,0):pos]))
results=[]
for i,bp in enumerate(bestPrimers):
if bp[1]>0:
if bestMatchPrimers[i][0]:
results.append([seqNames[-1],ref,alt,'+']+bestMatchPrimers[i][0][0].split('_')+[str(primerVals[i])]+bestMatchPrimers[i][0][2])
else:
results.append([seqNames[-1],ref,alt,'+']+bestMatchPrimers[i][1][0].split('_')+[str(primerVals[i])]+bestMatchPrimers[i][1][2])
else:
if bestMatchPrimers[i][0]:
results.append([seqNames[-1],revComplement(ref),revComplement(alt),'-']+bestMatchPrimers[i][0][0].split('_')+[str(primerVals[i])]+bestMatchPrimers[i][0][2])
else:
results.append([seqNames[-1],revComplement(ref),revComplement(alt),'-']+bestMatchPrimers[i][1][0].split('_')+[str(primerVals[i])]+bestMatchPrimers[i][1][2])
return(results)
mama={}
file=open(thisDir+'mamaPrimers.txt')
for string in file:
if string=='' or string=='\n': continue
cols=string.replace('\n','').replace('\r','').split('\t')
if cols[0] not in mama.keys():
mama[cols[0]]={}
if cols[1] not in mama[cols[0]].keys():
mama[cols[0]][cols[1]]={}
alts=cols[2].split('/')
for alt in alts:
mama[cols[0]][cols[1]][alt]=int(cols[3])
# Section of input arguments
par=argparse.ArgumentParser(description='endChooser automatically constructs MAMA-primers')
par.add_argument('--fasta-file','-fa',dest='fastaFile',type=str,help='Fasta-file that contains sequence with variable position designated like [A/G] or (A/G)',required=True)
par.add_argument('--amplicon-length','-alen',dest='ampliconLen',type=int,help='Amplicon length (Default: 150 bp)',required=False,default=150)
par.add_argument('--amplicon-length-deviation','-alendev',dest='ampliconLenDev',type=int,help='Amplicon length deviation (Default: 10 bp)',required=False,default=10)
par.add_argument('--primer-length','-plen',dest='primerLen',type=int,help='Optimal primer length (Default: 20 bp)',required=False,default=20)
par.add_argument('--primer-length-deviation','-plendev',dest='primerLenDev',type=int,help='Optimal primer length deviation (Default: 4 bp)',required=False,default=4)
par.add_argument('--melting-temperature','-mtemp',dest='meltTemp',type=int,help='Optimal melting temperature (Default: 60 degrees Celsius)',required=False,default=60)
par.add_argument('--melting-temperature-deviation','-mtempdev',dest='meltTempDev',type=int,help='Optimal melting temperature deviation (Default: 5 degrees Celsius)',required=False,default=5)
par.add_argument('--dg','-dg',dest='dimerdg',type=int,help='Minimal dG of dimer and hairpin formation (Default: 3000 kcal/mol)',required=False,default=3000)
par.add_argument('--min-amplicon','-min',dest='needMinAmpl',action='store_true',help='use this parameter if you need amplicons with a minimal length')
par.add_argument('--blast-done','-bd',dest='blastDone',action='store_true',help='use this parameter if blast search of your amplicons has been already done by endChooser')
par.add_argument('--reference-file','-ref',dest='refFile',type=str,help='Fasta-file with the human reference genome sequence ucsc.hg19.fa. Use this parameter only if you want to check primers for covering SNPs from dbSNP',required=False)
args=par.parse_args()
fastaFile=args.fastaFile
ampliconLen=args.ampliconLen
ampliconLenDev=args.ampliconLenDev
primerLen=args.primerLen
primerLenDev=args.primerLenDev
meltTemp=args.meltTemp
meltTempDev=args.meltTempDev
dimerdg=args.dimerdg
needMinAmpl=args.needMinAmpl
blastDone=args.blastDone
refFile=args.refFile
try:
f=open(fastaFile)
except FileNotFoundError:
print('#######\nERROR! Input fasta-file was not found:',fastaFile,'\n#######')
exit(0)
faFile=open(fastaFile+'.for_blast.fa','w')
if blastDone:
blastResultFileName=faFile.name[:-2]+'blast_result.xls'
if not os.path.exists(blastResultFileName):
print('#######\nERROR! You have chosen that blast has been already done but file was not found:',blastResultFileName,'\n#######')
exit(0)
print('Reading input file...')
wbw=xls.Workbook(fastaFile+'.primers.xls')
wsw=wbw.add_worksheet('MAMA-primers')
wsw.write_row(0,0,['Sequence_Name','Ref','Alt','Strand','Best_Primer_for_Mutant','Best_Primer_for_WT','Best_Reverse_Primer',
'Discrimination_Value','Mutant_Primer_Len','WT_Primer_Len','Reverse_Primer_Len','Amplicon_Length',
'Mutant_Primer_Tm','WT_Primer_Tm','Reverse_Primer_Tm',
'Mutant_Primer_Homodimer','WT_Primer_Homodimer','Reverse_Primer_Homodimer',
'Mutant_Primer_Hairpin1','WT_Primer_Hairpin2','Reverse_Primer_Hairpin',
'Mutant_Rev_Primers_Heterodimer','WT_Rev_Primers_Heterodimer',
'SNPs_in_Mutant_Primer','SNPs_in_WT_Primer','SNPs_in_Reverse_Primer'])
seqNames=[]
print('Constructing primers...')
p=re.compile('([\(\[](\w)\/\w[\)\]])')
results=[]
text=f.read()
lines=text.split('\n')
seqTotalNum=text.count('>')
allWork=seqTotalNum
seqNum=0
showPercWork(seqNum,allWork)
for line in lines:
if '>' in line:
if len(seqNames)>0:
results.extend(chooseBestPrimers(seq,seqNames,ampliconLen,ampliconLenDev,primerLen,primerLenDev,meltTemp,meltTempDev,dimerdg,needMinAmpl))
m=p.findall(seq)
newSeq=seq.replace(m[0][0],m[0][1])
faFile.write(newSeq+'\n')
seqNum+=1
showPercWork(seqNum,allWork)
seqNames.append(line.replace('> ','').replace('>','').replace('\n','').replace('\r',''))
faFile.write(line+'\n')
seq=''
else:
seq+=line.replace('\n','').replace('\r','').replace(' ','').replace('\t','').upper()
results.extend(chooseBestPrimers(seq,seqNames,ampliconLen,ampliconLenDev,primerLen,primerLenDev,meltTemp,meltTempDev,dimerdg,needMinAmpl))
m=p.findall(seq)
newSeq=seq.replace(m[0][0],m[0][1])
faFile.write(newSeq+'\n')
seqNum+=1
showPercWork(seqNum,allWork)
print()
faFile.close()
if refFile:
mv=myvariant.MyVariantInfo()
blastResultFileName=faFile.name[:-2]+'blast_result.xls'
seqsCoords={}
print('Searching amplicon sequences in the reference sequence with Blast...')
if not blastDone:
cmdResult=sp.check_output('blastn -query '+faFile.name+' -subject '+refFile+' -outfmt "6 qseqid sallseqid sstrand qstart qend sstart send pident qseq sseq qlen" -perc_identity 95 -qcov_hsp_perc 95 > '+blastResultFileName,shell=True)
file=open(blastResultFileName)
for string in file:
cols=string.replace('\n','').split('\t')
chrom=cols[1].replace('chr','')
start=int(cols[5])
end=int(cols[6])
qstart=int(cols[3])
qend=int(cols[4])
if cols[0] not in seqsCoords.keys():
seqsCoords[cols[0]]=[[chrom,start,end,qstart,qend]]
else:
seqsCoords[cols[0]].append([chrom,start,end,qstart,qend])
if len(seqsCoords.keys())==0:
print('ERROR! No sequences were found in the reference sequence:',refFile)
exit(0)
genomePoses={} # Contains information fromo dbSNP about genomic positions
print('Checking primers for covering SNPs from dbSNP...')
allWork=len(seqNames)
showPercWork(0,allWork)
for i,seqName in enumerate(seqNames):
if seqName not in seqsCoords.keys():
print('WARNING! No sequences were found in the reference sequence',refFile,'for the sequence',seqName)
for k,res in enumerate(results[2*i:2*i+2]):
wsw.write_row(2*i+k+1,0,res[:-3])
elif len(seqsCoords[seqName])>1:
print('WARNING! Input sequence',seqName,'has repeats in the reference sequence!')
for k,res in enumerate(results[2*i:2*i+2]):
wsw.write_row(2*i+k+1,0,res[:-3])
else:
for k,res in enumerate(results[2*i:2*i+2]):
allPoses={} # Contains information from dbSNP about positions of all primers in the input sequence
primerPoses=[[],[],[]]
primerSNPs=[]
for j,primerStart in enumerate(res[-3:]):
for pos in range(primerStart+1,primerStart+res[8]+1):
if pos not in allPoses.keys():
allPoses[pos]=[]
if seqsCoords[seqName][0][3]<=pos<=seqsCoords[seqName][0][4]:
if seqsCoords[seqName][0][0]+'_'+str(pos-seqsCoords[seqName][0][3]+seqsCoords[seqName][0][1]) in genomePoses.keys():
allPoses[pos]=genomePoses[seqsCoords[seqName][0][0]+'_'+str(pos-seqsCoords[seqName][0][3]+seqsCoords[seqName][0][1])][:]
for freq in allPoses[pos]:
if (res[3]=='-' and j<=1) or (res[3]=='+' and j==2):
ref=freq[:freq.index('>')]
alt=freq[freq.index('>')+1:freq.index('/')]
freq=freq.replace(ref+'>'+alt,revComplement(ref)+'>'+revComplement(alt))
primerPoses[j].append(str(pos-primerStart)+freq)
else:
primerPoses[j].append(str(pos-primerStart)+freq)
else:
mvRes=mv.query('dbsnp.chrom:'+seqsCoords[seqName][0][0]+' && dbsnp.hg19.start:'+str(pos-seqsCoords[seqName][0][3]+seqsCoords[seqName][0][1]),fields='dbsnp')
for hit in mvRes['hits']:
if 'gmaf' not in hit['dbsnp'].keys(): continue
alFreqs={}
for al in hit['dbsnp']['alleles']:
try:
alFreqs[al['allele']]=str(al['freq'])
except KeyError:
print('ERROR!',mvRes); wbw.close(); exit(0)
ref=hit['dbsnp']['ref']
alt=hit['dbsnp']['alt']
if (res[3]=='-' and j<=1) or (res[3]=='+' and j==2):
primerPoses[j].append(str(pos-primerStart)+revComplement(ref)+'>'+revComplement(alt)+':'+alFreqs[ref]+'/'+alFreqs[alt])
else:
primerPoses[j].append(str(pos-primerStart)+ref+'>'+alt+':'+alFreqs[ref]+'/'+alFreqs[alt])
allPoses[pos].append(ref+'>'+alt+':'+alFreqs[ref]+'/'+alFreqs[alt])
genomePoses[seqsCoords[seqName][0][0]+'_'+str(pos-seqsCoords[seqName][0][3]+seqsCoords[seqName][0][1])]=allPoses[pos][:]
else:
for freq in allPoses[pos]:
if (res[3]=='-' and j<=1) or (res[3]=='+' and j==2):
ref=freq[:freq.index('>')]
alt=freq[freq.index('>')+1:freq.index('/')]
freq=freq.replace(ref+'>'+alt,revComplement(ref)+'>'+revComplement(alt))
primerPoses[j].append(str(pos-primerStart)+freq)
else:
primerPoses[j].append(str(pos-primerStart)+freq)
primerSNPs.append(','.join(primerPoses[j]))
wsw.write_row(2*i+k+1,0,res[:-3]+primerSNPs)
showPercWork(i+1,allWork)
print()
else:
print('Writing to the result file...')
allWork=len(results)
for i,res in enumerate(results):
try:
wsw.write_row(i+1,0,res[:-3])
except:
print('ERROR!',res)
wbw.close(); exit(0)
showPercWork(i+1,allWork)
print()
wbw.close()
print('Done')
|
aakechin/endChooser
|
endChooser.py
|
Python
|
gpl-3.0
| 21,528
|
[
"BLAST"
] |
5c723f576882b55be916088dc7b638a491a5ccb603c3e116197a0c8930d0f40e
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from six.moves import range
from multiprocessing.sharedctypes import SynchronizedArray
from multiprocessing import Process, Manager
from joblib import cpu_count
import numpy as np
import sys
import MDAnalysis as mda
from ...coordinates.memory import MemoryReader
class TriangularMatrix(object):
"""Triangular matrix class. This class is designed to provide a
memory-efficient representation of a triangular matrix that still behaves
as a square symmetric one. The class wraps a numpy.array object,
in which data are memorized in row-major order. It also has few additional
facilities to conveniently load/write a matrix from/to file. It can be
accessed using the [] and () operators, similarly to a normal numpy array.
"""
def __init__(self, size, metadata=None, loadfile=None):
"""Class constructor.
Parameters
----------
size : int / array_like
Size of the matrix (number of rows or columns). If an
array is provided instead, the size of the triangular matrix
will be calculated and the array copied as the matrix
elements. Otherwise, the matrix is just initialized to zero.
metadata : dict or None
Metadata dictionary. Used to generate the metadata attribute.
loadfile : str or None
Load the matrix from this file. All the attributes and data will
be determined by the matrix file itself (i.e. metadata will be
ignored); size has to be provided though.
"""
if isinstance(metadata, dict):
self.metadata = np.array(metadata.items(), dtype=object)
else:
self.metadata = metadata
self.size = size
if loadfile:
self.loadz(loadfile)
elif isinstance(size, int):
self.size = size
self._elements = np.zeros((size + 1) * size / 2, dtype=np.float64)
elif isinstance(size, SynchronizedArray):
self._elements = np.array(size.get_obj(), dtype=np.float64)
self.size = int((np.sqrt(1 + 8 * len(size)) - 1) / 2)
elif isinstance(size, np.ndarray):
self._elements = size
self.size = int((np.sqrt(1 + 8 * len(size)) - 1) / 2)
else:
raise TypeError
def __getitem__(self, args):
x, y = args
if x < y:
x, y = y, x
return self._elements[x * (x + 1) / 2 + y]
def __setitem__(self, args, val):
x, y = args
if x < y:
x, y = y, x
self._elements[x * (x + 1) / 2 + y] = val
def as_array(self):
"""Return standard numpy array equivalent"""
a = np.zeros((self.size, self.size))
a[np.tril_indices(self.size)] = self._elements
a[np.triu_indices(self.size)] = a.T[np.triu_indices(self.size)]
return a
def savez(self, fname):
"""Save matrix in the npz compressed numpy format. Save metadata and
data as well.
Parameters
----------
fname : str
Name of the file to be saved.
"""
np.savez(fname, elements=self._elements, metadata=self.metadata)
def loadz(self, fname):
"""Load matrix from the npz compressed numpy format.
Parameters
----------
fname : str
Name of the file to be loaded.
"""
loaded = np.load(fname)
if loaded['metadata'].shape != ():
if loaded['metadata']['number of frames'] != self.size:
raise TypeError
self.metadata = loaded['metadata']
else:
if self.size*(self.size-1)/2+self.size != len(loaded['elements']):
raise TypeError
self._elements = loaded['elements']
def __add__(self, scalar):
"""Add scalar to matrix elements.
Parameters
----------
scalar : float
Scalar to be added.
"""
newMatrix = self.__class__(self.size)
newMatrix._elements = self._elements + scalar;
return newMatrix
def __iadd__(self, scalar):
"""Add scalar to matrix elements.
Parameters
----------
scalar : float
Scalar to be added.
"""
self._elements += scalar
return self
def __mul__(self, scalar):
"""Multiply with scalar.
Parameters
----------
scalar : float
Scalar to multiply with.
"""
newMatrix = self.__class__(self.size)
newMatrix._elements = self._elements * scalar;
return newMatrix
def __imul__(self, scalar):
"""Multiply with scalar.
Parameters
----------
scalar : float
Scalar to multiply with.
"""
self._elements *= scalar
return self
__rmul__ = __mul__
def __str__(self):
return str(self.as_array())
class ParallelCalculation(object):
"""
Generic parallel calculation class. Can use arbitrary functions,
arguments to functions and kwargs to functions.
Attributes
----------
n_jobs : int
Number of cores to be used for parallel calculation. If -1 use all
available cores.
function : callable object
Function to be run in parallel.
args : list of tuples
Each tuple contains the arguments that will be passed to
function(). This means that a call to function() is performed for
each tuple. function is called as function(\*args, \*\*kwargs). Runs
are distributed on the requested numbers of cores.
kwargs : list of dicts
Each tuple contains the named arguments that will be passed to
function, similarly as described for the args attribute.
nruns : int
Number of runs to be performed. Must be equal to len(args) and
len(kwargs).
"""
def __init__(self, n_jobs, function, args=None, kwargs=None):
"""
Parameters
----------
n_jobs : int
Number of cores to be used for parallel calculation. If -1 use all
available cores.
function : object that supports __call__, as functions
function to be run in parallel.
args : list of tuples
Arguments for function; see the ParallelCalculation class
description.
kwargs : list of dicts or None
kwargs for function; see the ParallelCalculation
class description.
"""
# args[i] should be a list of args, one for each run
self.n_jobs = n_jobs
if self.n_jobs == -1:
self.n_jobs = cpu_count()
self.functions = function
if not hasattr(self.functions, '__iter__'):
self.functions = [self.functions]*len(args)
if len(self.functions) != len(args):
self.functions = self.functions[:]*(len(args)/len(self.functions))
# Arguments should be present
if args is None:
args = []
self.args = args
# If kwargs are not present, use empty dicts
if kwargs:
self.kwargs = kwargs
else:
self.kwargs = [{} for i in self.args]
self.nruns = len(args)
def worker(self, q, results):
"""
Generic worker. Will run function with the prescribed args and kwargs.
Parameters
----------
q : multiprocessing.Manager.Queue object
work queue, from which the worker fetches arguments and
messages
results : multiprocessing.Manager.Queue object
results queue, where results are put after each calculation is
finished
"""
while True:
i = q.get()
if i == 'STOP':
return
results.put((i, self.functions[i](*self.args[i], **self.kwargs[i])))
def run(self):
"""
Run parallel calculation.
Returns
-------
results : tuple of ordered tuples (int, object)
int is the number of the calculation corresponding to a
certain argument in the args list, and object is the result of
corresponding calculation. For instance, in (3, output), output
is the return of function(\*args[3], \*\*kwargs[3]).
"""
results_list = []
if self.n_jobs == 1:
for i in range(self.nruns):
results_list.append((i, self.functions[i](*self.args[i],
**self.kwargs[i])))
else:
manager = Manager()
q = manager.Queue()
results = manager.Queue()
workers = [Process(target=self.worker, args=(q, results)) for i in
range(self.n_jobs)]
for i in range(self.nruns):
q.put(i)
for w in workers:
q.put('STOP')
for w in workers:
w.start()
for w in workers:
w.join()
results.put('STOP')
for i in iter(results.get, 'STOP'):
results_list.append(i)
return tuple(sorted(results_list, key=lambda x: x[0]))
def trm_indices(a, b):
"""
Generate (i,j) indeces of a triangular matrix, between elements a and b.
The matrix size is automatically determined from the number of elements.
For instance: trm_indices((0,0),(2,1)) yields (0,0) (1,0) (1,1) (2,0)
(2,1).
Parameters
----------
a : (int i, int j) tuple
starting matrix element.
b : (int i, int j) tuple
final matrix element.
"""
i, j = a
while i < b[0]:
if i == j:
yield (i, j)
j = 0
i += 1
else:
yield (i, j)
j += 1
while j <= b[1]:
yield (i, j)
j += 1
def trm_indices_nodiag(n):
"""generate (i,j) indeces of a triangular matrix of n rows (or columns),
without diagonal (e.g. no elements (0,0),(1,1),...,(n,n))
Parameters
----------
n : int
Matrix size
"""
for i in range(1, n):
for j in range(i):
yield (i, j)
def trm_indices_diag(n):
"""generate (i,j) indeces of a triangular matrix of n rows (or columns),
with diagonal
Parameters
----------
n : int
Matrix size
"""
for i in range(0, n):
for j in range(i + 1):
yield (i, j)
def merge_universes(universes):
"""
Merge list of universes into one
Parameters
----------
universes : list of Universe objects
Returns
----------
Universe object
"""
for universe in universes:
universe.transfer_to_memory()
return mda.Universe(
universes[0].filename,
np.concatenate(tuple([e.trajectory.timeseries(format='fac') for e in universes]),
axis=0),
format=MemoryReader)
|
alejob/mdanalysis
|
package/MDAnalysis/analysis/encore/utils.py
|
Python
|
gpl-2.0
| 12,064
|
[
"MDAnalysis"
] |
2b953548c8890f62f24cc51310da477145a9b6ca23da169af1195ea985c2a499
|
# coding=utf-8
# имена девушек генерируются из списков имен (тип девушки_first) и фамилий (тип девушки_last). Если списка фамилий
# нет - генерируется только из списка имен.
girls_names = {
'peasant_first': [
u'Жанна', u'Герда', u'Баббета', u'Cюзи', u'Альба', u'Амели', u'Аннета', u'Жоржетта', u'Бетти',
u'Бетси', u'Бланка', u'Бьянка', u'Дейзи', u'Джинни', u'Джуди', u'Дороти', u'Зои', u'Ирен', u'Ивет',
u'Колет', u'Криси', u'Кэтти', u'Кэт', u'Лили', u'Лиди', u'Лулу'
],
'citizen_first': [
u'Аделия', u'Аврора', u'Альбертина', u'Анджелла', u'Аврелия', u'Беатрис', u'Бернадетт',
u'Бриджит', u'Вероник', u'Виолет', u'Вирджиния', u'Габриэлла', u'Джаннет', u'Джулиана', u'Доминика',
u'Жаклина', u'Жозефина', u'Джульетта', u'Камилла', u'Каролина', u'Кэйтлин', u'Ирен', u'Мелисса', u'Марджори',
u'Натали', u'Пенелопа', u'Розали', u'Розета', u'Селеста', u'Симона', u'Стефани', u'Сюзанна',
u'Тереза', u'Флора', u'Эммануэль', u'Адалинда', u'Альбертина', u'Амелинда', u'Гризельда',
u'Виктория', u'Ирма', u'Каролина', u'Кристиана', u'Кэтрин', u'Лиона', u'Лорели', u'Маргарита', u'Франциска',
u'Хенелора', u'Хильда', u'Элеонора', u'Абигайль', u'Антония', u'Долорес', u'Доротея',
u'Женевьева', u'Жозефина', u'Инесс', u'Кармелита', u'Консуэлла', u'Летиция', u'Марселла', u'Присцилла',
u'Рамона', u'София', u'Ефимия', u'Ефания', u'Лидия', u'Беатриче',
],
'princess_first': [
u'Аннабель', u'Аделия', u'Авелин', u'Айседора', u'Альбертина', u'Анастасия', u'Антуанетта',
u'Беатрис', u'Валентина', u'Виктория', u'Габриэлла', u'Джиневра', u'Доминика', u'Джулианна',
u'Джульетта', u'Жюстина', u'Жозефина', u'Ивонна', u'Изабелла', u'Камилла', u'Клариса',
u'Клементина', u'Кристина', u'Лукреция', u'Марго', u'Матильда', u'Мелисента', u'Марианна', u'Олимпия',
u'Пенелопа', u'Розалинда', u'Розамунда', u'Селестина', u'Серафина', u'Сюзанна', u'Стефания', u'Тереза',
u'Флафия', u'Фелиция', u'Генриэтта', u'Гертруда', u'Шарлотта', u'Эмммануэль', u'Альбертина', u'Амелинда',
u'Брунгильда', u'Вильгельмина', u'Изольда', u'Рафаэлла', u'Амаранта', u'Дельфиния', u'Доротея',
u'Мерседес', u'Офелия',
],
'princess_last': [
u'дэ Мюзи', u'фон Баургафф', u'дэ Альбре', u'дэ Блуа', u'дэ Виржи', u'ди Гиз', u'дэ Бриенн',
u'дэ Колиньи', u'дэ Ла Тур', u'дэ Лузиньян', u'дэ Фуа', u'дэ Брисак', u'дэ Круа', u'дэ Лин',
u'дэ Кюлот', u'дэ Сен-При', u'фон Баттенберг', u'фон Беннгис', u'фон Вальбиц', u'фон Вительсбах',
u'фон Гогеншауфен', u'фон Зальф', u'фон Люденштафф', u'фон Мирбах', u'фон Розен', u'фон Церинген',
u'фон Грюнберг', u'фон Штюрберг', u'фон Шелленбург', u'Строцци', u'Сфорца', u'Альбици',
u'Барбариго', u'Пацци', u'Бранкаччо', u'да Верана', u'Висконти', u'Гримальди', u'да Полента', u'делла Тори',
u'да Камино', u'Монтрефельто', u'Манфреди', u'Фарнезе', u'Фрегозо', u'де Мендоза', u'ла Серда',
],
'elf_first': [
u'Берунвен', u'Фанавен', u'Арвен', u'Лучиэнь', u'Феалиндэ', u'Эстелендиль', u'Астера', u'Теолинвен',
u'Куивэн', u'Мрвэн', u'Интиальвен', u'Анарвен', u'Аманиэль', u'Анариэль', u'Лариэль', u'Лотанариэ',
u'Исильиндиль', u'Селфарианис', u'Йорингель', u'Оросинвиль', u'Гилэстель', u'Валакирэ'
],
'ogre_first': [
u'Хунн', u'Йорва', u'Дирга', u'Велга', u'Сига', u'Йалгуль', u'Дорба', u'Гирга', u'Давири', u'Шалга',
u'Орва', u'Дезра', u'Арга', u'Бигра', u'Варга', u'Енза', u'Зарта', u'Икла', u'Корда', u'Логаза',
u'Мирбу', u'Нира',
],
'mermaid_first': [
u'Ариэль', u'Блажена', u'Будимила', u'Ведана', u'Велина', u'Венцеслава', u'Верея', u'Велезара',
u'Веселина', u'Витана', u'Влада', u'Весемлиа', u'Годица', u'Горлина', u'Далина', u'Ждана',
u'Деяна', u'Дивина', u'Доляна', u'Есена', u'Жилена', u'Завида', u'Зоряна', u'Златина', u'Ивица',
u'Калёна', u'Красоя', u'Купава', u'Лада', u'Леля', u'Малиша', u'Млава', u'Милана', u'Младлена',
u'Мирана', u'Невена', u'Обрица', u'Пава', u'Пригода', u'Рада', u'Ракита', u'Ружана',
u'Силимина', u'Серебрина', u'Славена', u'Станимира', u'Стояна', u'Томила', u'Умила', u'Ундина',
u'Цветана', u'Чаруна', u'Янина', u'Яромила', u'Ясмания'
],
'siren_first': [
u'Ариэль', u'Блажена', u'Будимила', u'Ведана', u'Велина', u'Венцеслава', u'Верея', u'Велезара',
u'Веселина', u'Витана', u'Влада', u'Весемлиа', u'Годица', u'Горлина', u'Далина', u'Ждана', u'Деяна',
u'Дивина', u'Доляна', u'Есена', u'Жилена', u'Завида', u'Зоряна', u'Златина', u'Ивица', u'Калёна',
u'Красоя', u'Купава', u'Лада', u'Леля', u'Малиша', u'Млава', u'Милана', u'Младлена', u'Мирана',
u'Невена', u'Обрица', u'Пава', u'Пригода', u'Рада', u'Ракита', u'Ружана', u'Силимина', u'Серебрина',
u'Славена', u'Станимира', u'Стояна', u'Томила', u'Умила', u'Ундина', u'Цветана', u'Чаруна',
u'Янина', u'Яромила', u'Ясмания'
],
'ice_first': [
u'Астрид', u'Бригита', u'Боргильда', u'Вигдис', u'Вилла', u'Гурдун', u'Гунхильд', u'Дорта', u'Ингрид',
u'Ингеборга', u'Йорнун', u'Матильда', u'Рангильда', u'Руна', u'Сигурд', u'Сванхильда', u'Сигюнд',
u'Ульрика', u'Фрида', u'Хлодвен', u'Хильда', u'Эрика'
],
'fire_first': [
u'Азиль', u'Азиза', u'Базайна', u'Багира', u'Будур', u'Бушра', u'Гюльчатай', u'Гуля', u'Гульнара',
u'Гулистан', u'Фируза', u'Фатима', u'Ясмин', u'Айгюль', u'Зульфия', u'Ламия', u'Лейла', u'Марьям',
u'Самира', u'Хурма',
u'Чинара', u'Эльмира'
],
'titan_first': [
u'Агата', u'Адонисия', u'Алексино', u'Амброзия', u'Антигона', u'Ариадна', u'Артемисия', u'Афродита',
u'Гликерия', u'Дельфиния', u'Деметра', u'Зиновия', u'Калисто', u'Калипсо', u'Кора', u'Ксения',
u'Медея', u'Мельпомена', u'Мнемозина', u'Немезида', u'Олимпия', u'Пандора', u'Персефона',
u'Таисия', u'Персея', u'Персея', u'Психея', u'Сапфо', u'Талия', u'Терпсихора', u'Фаломена',
u'Гаромония', u'Хрисеида', u'Эфимия', u'Юнона'
]
}
# Информация о всех типах девушек
girls_info = {
'peasant': {
'magic_rating': 0, # магический рейтинг
'regular_spawn': 'poisonous_asp', # идентификатор обычного отродья
'advanced_spawn': 'basilisk', # идентификатор продвинутого отродья
'giantess': False, # является ли великаншей
'avatar': 'peasant', # аватарка
'description': u'селянка', # описание для вывода в текст
't_count_min': 0, # количество сокровищ минимальное
't_count_max': 2, # количество сокровищ максимальное
't_price_min': 1, # минимальная цена предмета
't_price_max': 25, # максимальная цена предмета
't_alignment': 'human', # тип украшений
't_list': [
'casket', 'statue', 'mirror', 'comb', 'phallos', 'band', 'earring', 'necklace',
'pendant', 'ring', 'broch', 'armbrace', 'legbrace', 'fibula', 'farting'],
# список возможных предметов в сокровищах
},
'citizen': {
'magic_rating': 0,
'regular_spawn': 'winged_asp',
'advanced_spawn': 'kobold',
'giantess': False,
'avatar': 'citizen',
'description': u'горожанка',
't_count_min': 0,
't_count_max': 4,
't_price_min': 25,
't_price_max': 100,
't_alignment': 'human',
't_list': [
'casket', 'statue', 'mirror', 'comb', 'phallos', 'band', 'diadem', 'tiara', 'earring',
'necklace', 'pendant', 'ring', 'broch', 'gemring', 'armbrace', 'legbrace', 'chain',
'fibula', 'taller'],
},
'thief': {
'magic_rating': 0,
'regular_spawn': 'winged_asp',
'advanced_spawn': 'kobold',
'giantess': False,
'avatar': 'thief',
'description': u'воровка',
't_count_min': 2,
't_count_max': 5,
't_price_min': 25,
't_price_max': 250,
't_alignment': 'human',
't_list': [
'casket', 'statue', 'mirror', 'comb', 'phallos', 'band', 'diadem', 'tiara', 'earring',
'necklace', 'pendant', 'ring', 'broch', 'gemring', 'armbrace', 'legbrace', 'chain',
'fibula', 'taller', 'dublon'],
},
'knight': {
'magic_rating': 1,
'regular_spawn': 'krokk',
'advanced_spawn': 'lizardman',
'giantess': False,
'avatar': 'knight',
'description': u'воительница',
't_count_min': 2,
't_count_max': 5,
't_price_min': 25,
't_price_max': 250,
't_alignment': 'knight',
't_list': [
'casket', 'statue', 'mirror', 'comb', 'phallos', 'band', 'diadem', 'tiara', 'earring',
'necklace', 'pendant', 'ring', 'broch', 'gemring', 'armbrace', 'legbrace', 'chain',
'fibula', 'taller', 'dublon'],
},
'princess': {
'magic_rating': 0,
'regular_spawn': 'krokk',
'advanced_spawn': 'lizardman',
'giantess': False,
'avatar': 'princess',
'description': u'аристократка',
't_count_min': 2,
't_count_max': 5,
't_price_min': 100,
't_price_max': 1000,
't_alignment': 'knight',
't_list': [
'casket', 'statue', 'mirror', 'comb', 'phallos', 'band', 'diadem', 'tiara', 'earring',
'necklace', 'pendant', 'ring', 'broch', 'gemring', 'armbrace', 'legbrace', 'chain',
'fibula'],
},
'elf': {
'magic_rating': 1,
'regular_spawn': 'gargoyle',
'advanced_spawn': 'dragonborn',
'giantess': False,
'avatar': 'elf',
'description': u'эльфийская дева',
't_count_min': 1,
't_count_max': 4,
't_price_min': 250,
't_price_max': 2000,
't_alignment': 'elf',
't_list': [
'casket', 'statue', 'mirror', 'comb', 'phallos', 'band', 'diadem', 'tiara', 'earring',
'necklace', 'pendant', 'ring', 'broch', 'gemring', 'armbrace', 'legbrace', 'chain'],
},
'mermaid': {
'magic_rating': 1,
'regular_spawn': 'octopus',
'advanced_spawn': 'sea_bastard',
'giantess': False,
'avatar': 'mermaid',
'description': u'русалка',
't_count_min': 0,
't_count_max': 4,
't_price_min': 10,
't_price_max': 200,
't_alignment': 'merman',
't_list': [
'casket', 'statue', 'mirror', 'comb', 'phallos', 'band', 'diadem', 'tiara', 'earring',
'necklace', 'pendant', 'ring', 'broch', 'gemring', 'armbrace', 'legbrace', 'chain'],
},
'ogre': {
'magic_rating': 2,
'regular_spawn': 'strigg',
'advanced_spawn': 'minotaur',
'giantess': True,
'avatar': 'ogre',
'description': u'людоедка',
't_count_min': 0,
't_count_max': 3,
't_price_min': 250,
't_price_max': 1500,
't_alignment': 'knight',
't_list': [
'casket', 'statue', 'mirror', 'comb', 'phallos', 'band', 'diadem', 'tiara', 'earring',
'necklace', 'pendant', 'ring', 'broch', 'gemring', 'armbrace', 'legbrace', 'chain',
'fibula', 'farting', 'taller', 'dublon'],
},
'siren': {
'magic_rating': 2,
'regular_spawn': 'murloc',
'advanced_spawn': 'naga',
'giantess': True,
'avatar': 'mermaid',
'description': u'сирена',
't_count_min': 1,
't_count_max': 4,
't_price_min': 250,
't_price_max': 2000,
't_alignment': 'merman',
't_list': [
'casket', 'statue', 'mirror', 'comb', 'phallos', 'band', 'diadem', 'tiara', 'earring',
'necklace', 'pendant', 'ring', 'broch', 'gemring', 'armbrace', 'legbrace', 'chain',
'taller', 'dublon'],
},
'ice': {
'magic_rating': 2,
'regular_spawn': 'ice_worm',
'advanced_spawn': 'yettie',
'giantess': True,
'avatar': 'ice',
'description': u'ледяная великанша',
't_count_min': 1,
't_count_max': 5,
't_price_min': 250,
't_price_max': 2500,
't_alignment': 'human',
't_list': [
'casket', 'statue', 'mirror', 'comb', 'phallos', 'band', 'diadem', 'tiara', 'earring',
'necklace', 'pendant', 'ring', 'broch', 'gemring', 'armbrace', 'legbrace', 'chain',
'taller', 'dublon'],
},
'fire': {
'magic_rating': 2,
'regular_spawn': 'hell_hound',
'advanced_spawn': 'barlog',
'giantess': True,
'avatar': 'fire',
'description': u'огненная великанша',
't_count_min': 1,
't_count_max': 5,
't_price_min': 250,
't_price_max': 2500,
't_alignment': 'dwarf',
't_list': [
'casket', 'statue', 'mirror', 'comb', 'phallos', 'band', 'diadem', 'tiara', 'earring',
'necklace', 'pendant', 'ring', 'broch', 'gemring', 'armbrace', 'legbrace', 'chain',
'taller', 'dublon'],
},
'titan': {
'magic_rating': 2,
'regular_spawn': 'chimera',
'advanced_spawn': 'troll',
'giantess': True,
'avatar': 'titan',
'description': u'титанида',
't_count_min': 3,
't_count_max': 6,
't_price_min': 500,
't_price_max': 5000,
't_alignment': 'elf',
't_list': [
'casket', 'statue', 'mirror', 'comb', 'phallos', 'band', 'diadem', 'tiara', 'earring',
'necklace', 'pendant', 'ring', 'broch', 'gemring', 'armbrace', 'legbrace', 'chain',
'taller', 'dublon'],
},
}
# Информация о всех типах отродий
spawn_info = {
'goblin': {
'power': 1, # сила
'modifier': [], # возможные роли
'name': u'Гоблин', # название
'born': u'Ошибка', # Описание при рождении
},
'poisonous_asp': {
'power': 1, # сила
'modifier': ['poisonous'], # возможные роли
'name': u'Ядовитый аспид', # название
'born': u'вылупившиеся из продолговатых мягких яиц ядовитые змеи ничем особенно не отличаются от болотных гадюк, разве что крупнее и агрессивнее. Вместо того чтобы прятаться в глухих местах, эти злобные ядовитые твари вечно ищут кого ужалить, будь то люди или домашний скот. От их токсина нет противоядия, а смерь медленная и крайне мучительная.', # Описание при рождении
},
'winged_asp': {
'power': 2,
'modifier': ['poisonous'],
'name': u'Крылатый аспид',
'born': u'эти крупные ядовитые змеи в отличие от обычных пресмыкающихся наделены унаследованными от драконьей крови крыльями. Одно дело случайно наступить на гадюку, у совсем другое когда она приземляется тебе на шею прямо с неба. Яд крылатых аспидов приводит к долгой и мучительной смерти, а кусаться они ох как любят.', # Описание при рождении
},
'krokk': {
'power': 1,
'modifier': ['servant'],
'name': u'Крокк',
'born': u'рождённые от благородной дамы эти существа превосходят тварей которых способна родить любая простолюдинка. Впрочем, несмотря на мощное телосложение и кое-какие мозги в охранники логова Кроккки не годятся. Они подслеповаты и вечно дремлют устроившись на солнышке или нырнув в грязь. Впрочем их можно заставить выполнять работу по дому или на строителсьстве.', # Описание при рождении
},
'basilisk': {
'power': 3,
'modifier': ['poisonous'],
'name': u'Василиск',
'born': u'это жуткого вида птенчики, с петушиными гребнями и змеиными хвостами. Хотя эти твари и безмозглые, они всё же намного опаснее обычных ядовитых аспидов, которых крестьянка могла бы родить от менее коварного дракона. Василиски, так же известные как кокатриксы, способны отравить человека просто поглядев ему в глаза и к тому же они летают хоть и неуклюже.', # Описание при рождении
},
'kobold': {
'power': 2,
'modifier': ['servant'],
'name': u'Кобольд',
'born': u'мелкие, дохлые и скрюченные кобольды это всё что способна произвести на свет обычная женщина даже от очень могучего драконьего семени. Тем не менее эти драконоподобные гуманоиды обладают достаточным интеллектом чтобы выполнять работу по хозяйству. В бою они примерно равны обычным гоблинам, но ещё и трусливы до ужаса, так что ставить их на охрану логова было бы опрометчиво.', # Описание при рождении
},
'lizardman': {
'power': 3,
'modifier': ['warrior'],
'name': u'Ящерик',
'born': u'сочетание могучего драконьего семени и чистой благородной крови дало лучших отродий которых только способна родить смертная женщина. Взрослый ящерик куда крупнее и сильнее обычного человека, покрыт прочной чешуёй и не чувствителен к боли. Рептилоиды быстры, наблюдательны и достаточно умны чтобы стать отличными воинами. Они так же любят строить коварные планы по тайному захвату мира, но тут им придётся встать в очередь за своей бабушкой - Владычицей.', # Описание при рождении
},
'dragonborn': {
'power': 3,
'modifier': ['elite'],
'name': u'Драконорождённый',
'born': u'драконорождённый потомок Aein Sidhe, сочетает в себе силу и ярость драконьего рода с колдовским могуществом старшей крови детей богини Дану. Его сила размеры и интеллект далеко превосходят возможности не только людей но и альвов с цвергами. Драконорождённый вполне может померяться силой с великаном и станет отличным элитным стражем сокровищ или воином армии тьмы.', # Описание при рождении
},
'gargoyle': {
'power': 4,
'modifier': ['warrior'],
'name': u'Гаргуйль',
'born': u'драконьему семени не хватило потенциала чтобы полностью раскрыть возможности магической крови детей Дану, но даже такие уродливые горгулии будут полезны в армии тьмы или на охранной службе. Способность к полёту даёт им превосходство над обычными рептилодами, даже не говоря о гоблинах и людях.', # Описание при рождении
},
'sea_bastard': {
'power': 3,
'modifier': ['poisonous', 'marine'],
'name': u'Рыбоглаз',
'born': u'в сочетании с могучим драконьем семенем кровь морского народа дала жуткую пародию на русалку - рыбоглаза. Эти злобные и уродливые твари способны жить лишь в морской воде и это единственное что мешает им стать дополнением к воинству тьмы что собирается в пустошах под рукой Владычицы. К тому же они слишком прожорливы и легко отвлекаются от караульной службы чтобы забить косяк рыб. Но всё же можно принудить их служить в морском логове.', # Описание при рождении
},
'octopus': {
'power': 5,
'modifier': ['poisonous', 'marine'],
'name': u'Ядовитый спрут',
'born': u'похожие на здоровенных лиловых осьминогов, эти безмозглые морские твари отличаются агрессивыным нравом и наличием яда в присосках. Неудачливыми ныряльщикам не поздоровится.', # Описание при рождении
},
'hell_hound': {
'power': 4,
'modifier': ['poisonous'],
'name': u'Адская гончая',
'born': u'Семя дракона сильно пострадало от раскалённой матки огненной великанши. Из яиц на свет появились мутировавшие многоголовые твари напоминающие помесь собаки, ящерицы и газовой горелки. Они слишком дики и тупы для армейской службы, зато способны навести ужас на обжитые земли.', # Описание при рождении
},
'minotaur': {
'power': 5,
'modifier': ['elite'],
'name': u'Минотавр',
'born': u'Семя коварного даркона отлично раскрыло потенциал дикости и ярости в крови людоедки. Рогатый, мохнатый, склонный к припадкам ярости минотавр способен в одиночку разметать отряд тяжелой панцирной пехоты и перетрахать всех женщин деревни за одну ночь. Всё же он достаточно умён чтобы служить высшей силе, так что из него выйдёт неплохой страж сокровищницы или элитный боец.', # Описание при рождении
},
'murloc': {
'power': 3,
'modifier': ['warrior', 'marine'],
'name': u'Мурлок',
'born': u'Жутко искажённые пародии сразу на людей, лягушек и рыб, мурлоки стали бы неплохими воинами в армии тьмы, если бы могли жить вдали от воды. Но так, максимум на что они способны - охранять от посягательств подводные логова драконов или терроризировать морской народ.', # Описание при рождении
},
'naga': {
'power': 6,
'modifier': ['elite', 'marine'],
'name': u'Нага',
'born': u'коварство дракона позволило породить от сирены огромную и могучую тварь именуемую Наг (змей). Наг сочетает в себе качества человека и морской змеи, но кроме того он обладает великанским размером, невероятной силой и живучестью. Он мог мы стать элитным бойцом армии тьмы, если бы не пересыхал на суше. Впрочем из него получится отличный страж сокровищницы. ', # Описание при рождении
},
'ice_worm': {
'power': 7,
'modifier': ['poisonous'],
'name': u'Ледяной червь',
'born': u'слишком слабое для ледяной великанши семя, сделало потомство неразумным. Это змеи. Огроменные, уродливые, с холодными как лёд панцирями и жуткими пастями змеи. Человека такая прожуёт не задумываясь, но для службы в армии ей не хватит мозгов. ', # Описание при рождении
},
'yettie': {
'power': 6,
'modifier': ['elite'],
'name': u'Йетти',
'born': u'Продуктом союза дракона и ледяной великанши стал мохнатый, рогатый великан больше похожий на обезьяну чем на разумное существо. И тем не менее, он хоть и дик но весьма умён. Йётти может стать отличным элитным бойцом в армии тьмы.', # Описание при рождении
},
'troll': {
'power': 8,
'modifier': ['elite'],
'name': u'Тролль',
'born': u'Тролль - самое могучее из отродий драконов, которое только может появиться на свет без вмешательства Владычицы. Он крупнее и сильнее чем титан. Практически неуязвим и достаточно умён чтобы служить в воинстве тьмы. А ещё он зелёный, толстый и любит когда его кормят.', # Описание при рождении
},
'strigg': {
'power': 6,
'modifier': ['poisonous'],
'name': u'Стригой',
'born': u'рождение можно считать неудачным. Семя дракона оказалось жидковатым для людоедки и в итоге на свет родились жуткие крылатые уродцы, лишенные каких либо мозгов. Стриги разумеется агрессивны и даже ядовиты, но слишком тупы для армейской службы. ', # Описание при рождении
},
'barlog': {
'power': 6,
'modifier': ['elite'],
'name': u'Дэв',
'born': u'драконьему семени хватило потенциала чтобы грамотно слиться с огненной сущностью великанши. Результатом стал огромный, пеперёк себя шире Дэв. Он не только чудовищно силён, но к тому же обладает властью над огнём. Это просто великолепный элитный воин для армии тьмы.', # Описание при рождении
},
'chimera': {
'power': 10,
'modifier': ['poisonous'],
'name': u'Химера',
'born': u'магическая сущность титаниды с трудом слилась с драконьей кровью, породив уродливую хищную химеру. Хотя эта многоголовая, агрессивная и ядовитая тварь способна разорвать в прямом бою даже великана, она не обладает даже зачатками разума. Служить в армии ей не суждено.', # Описание при рождении
},
}
girl_events = {
'escape': 'lb_event_girl_escape', # событие "побег из заключения"
'spawn': 'lb_event_girl_spawn', # событие "рождение отродий"
'free_spawn': 'lb_event_girl_free_spawn', # событие "рождение отродий на воле"
'hunger_death': 'lb_event_girl_hunger_death', # событие "смерть девушки от голода"
'kill': 'lb_event_girl_kill', # событие "беременную девушку убивают на свободе"
}
girls_texts = {
# Подстановки:
# %(dragon_name)s = Краткое имя текущего дракона
# %(dragon_name_full)s = Имя дракона с эпитетом
# %(dragon_type)s = Тип анатомии дракона (змей, линдвурм и т.п.)
# %(girl_name)s = имя текущей женщины (однако, игра слов :) )
# %(girl_title)s = тип женщины (крестьянка, горожанка, леди, русалка, эльфийска дева и т.п.)
# %(spawn_name)s - тип отродий для описаний рождения (начинается с заглавной буквы)
# %(rob_list)s - список украденного
'girl': { # используется, если нет подходящего текста или отсутствует нужный тип девушки
'shout': ( # Реакция девушки, прямой речью
u"Ой, а мне текст не написали (((",
),
'prelude': ( # Описание прелюдий
u"Одним неуловимым движением %(dragon_name)s подобрался вплотную к женщине и сбил её с "
u"ног, а затем начал рвать зубами её одежду словно остервенелый пёс. %(girl_name)s "
u"отчаянно отбивалась и кричала, но толку от этого было не много, изодранная одежда "
u"разлетелась клочками оставляя её полностью обнаженной и беззащитной перед охваченным "
u"похотью ящером.",
),
'sex': ( # Описание секса с девушкой
u"Отчаянно пытаясь спасти свою невинность, %(girl_title)s закрылась руками но %(dragon_type)s "
u"предпринял обходной манёвр. Широко разинув свою зубастую пасть он обхватил голову девушки "
u"челюстями, так что всё её лицо оказалось внутри, лишаясь доступа к воздуху. Девушка широко "
u"открыла рот пытаясь вдохнуть хоть немного кислорода, но вместо этого в её глотку проник "
u"длинный раздвоенный язык ящера. Теперь все когда все силы девушки были направлены на то "
u"чтобы оторвать смрадную пасть от своего лица она и думать забыла о невинности. Скребя "
u"ногтями по твёрдой чешуе дракона и дрыгая ногами %(girl_name)s внезапно почувствовала как "
u"снизу в неё проникает что-то большое и твёрдое. Покрытый слизью рептилоидный член с "
u"лёгкостью прорвал тонкую плёнку защищавшую вход в тугое молодое влагалище, безжалостно "
u"растягивая и продавливая всё на своём пути. Почти теряя сознание от боли и недостатка "
u"воздуха, %(girl_name)s внезапно почувствовала что челюсти насильника размыкаются, вновь "
u"позволяя ей вдохнуть. %(dragon_name)s хотел насладиться её воплями и плачем.",
),
'impregnate': ( # Оплодотворение
u"Сдавленная в безжалостных объятьях ящера, %(girl_title)s почувствовала как он "
u"ускоряет темп своих движений. Боль стала практически невыносимой но крик девушки "
u"потерялся, перекрытый рёвом наслаждения насильника. Конвульсивно содрагаясь всем "
u"телом %(dragon_type)s вливал в истерзанное лоно девушки целые литры липкого и "
u"густого семени, заставляя её маленький животик раздуться изнутри. Когда "
u"%(dragon_name)s наконец отстранился от своей жертвы из неё вытек целый водопад "
u"семени, но тем не менее количества оставшегося внутри было более чем достаточно "
u"чтобы гарантировать надёжное оплодотворение. Дело было сделано надёжно.",
),
'new': ( # Описание новой девушки
u"%(girl_name)s - %(girl_title)s.",
),
'free': ( # Описание процесса выпускания на свободу
u"Пусть сама заботится о себе. Если её не убьют свои же, узнав что за отродье растёт в её чреве...",
),
'free_prison': ( # Описание процесса выпускания на свободу из тюрьмы
u"Незачем держать её взаперти, охранять ещё... пусть катится на все четыре стороны.",
),
'steal': ( # Описание процесса воровства девушки
u"%(dragon_name)s относит пленницу в своё логово...",
),
'jail': ( # Описание процесса заточения в темницу
u"...и сажает её под замок.",
),
'jailed': ( # Описание процесса возврата в темницу
u"%(dragon_name)s возвращает девушку в темницу.",
),
'eat': ( # Описание процесса поедания девушки. Как же ему не стыдно, червяку подколодному.
u"Ты меня съешь?",
),
'rob': ( # Описание процесса ограбления девушки.
u"%(dragon_name)s грабит девушку и получает: \n %(rob_list)s.",
),
'traps': ( # Описание процесса побега и гибели в ловушке.
u"%(girl_name)s убегает из темницы и гибнет в ловушках.",
),
'escape': ( # Описание успешного побега
u"%(girl_name)s спасается бегством",
),
'spawn_common': ( # Описание родов
u"%(girl_name)s откладывает яйца из которых под наблюдением слуг вылупятся новые отродья. \n %(spawn_name)s.",
),
'spawn_elite': ( # Описание родов
u"%(girl_name)s в мучениях откладывает огромное яйцо с толстой чешуйчатой скорлупой. \n %(spawn_name)s.",
),
'anguish': ( # Описание смерти от тоски
u"%(girl_name)s умирает в тоске.",
),
'hunger_death': ( # Описание смерти от голода
u"%(girl_name)s умирает от голода.",
),
'kill': ( # Описание смерти от селян
u"Люди узнают, что %(girl_name)s беременна от дракона и убивают её.",
),
'free_spawn': ( # Описание родов на свободе
u"%(girl_name)s в тайне от людей откладывает яйца из которых вылупляются кровожадные монстры... Теперь они будут резвиться на воле, терроризируя округу и возможно сожрут собственную мать.",
),
'prison': ( # Проведываем девушку в тюрьме
u"%(girl_name)s находится в заключении.",
),
},
'peasant': { # используется для крестьянок
'new': ( # описание крестьянки
u"Сельская девица по имени %(girl_name)s.",
),
'shout': ( # Реакция девушки, прямой речью
u"Ой, божечки!..",
u"Ай мамочка!..",
u"Ты куда языком своим слюнявым тычешь змеюка поганая?!",
u"Ой-ой-ой, только не ешь меня пожалуйста...",
u"Ай. Нет-нет-нет, только не туда... ох...",
u"Драконьчик, миленький, я всё сделаю тебе, только не кушай меня пожалуйста!",
u"Ты что собрался делать этим елдаком, бесстыдник?! Да он не влезет же, ящерица смердячая! Ааааааааай...",
u"Ай, что ты делаешь?! Больно... нет, пожалуйста... такой то здоровенный... уууй больно же!!!",
u"Ишь что удумал, чудище. Пусти... ай, падла... пусти говорят тебе.",
u"Неужто правду бабы говорят что драконы девок портат? Ой, не рычи. Понялая я, поняла. Не кусайся только.",
u"Что, люба я тебе змей? Ишь елдаком махает как пастух погонялом!",
u"Ох пресвятая дева, срамота то какая...",
u"(тихонько плачет и закрывает лицо руками)",
u"(яростно отбивается и пыхтит сквозь сжатые зубы)",
u"Ой, ну не надо драконьчик, меня же маменька убьёт если узнает что я от тебя понесла. Может я ручками тебя там поглажу?",
),
'eat': ( # Описание процесса поедания девушки.
u"Ой, божечки!..",
u"Ай мамочка!..",
u"Неееееет!...",
u"Аааааааа!....",
u"Ой не рычи так, мне страшно...",
u"Ну и зубищи у тебя... ай нет-нет-нет...",
u"Oh shi~",
u"Не жри меня,... пожалуйста, я всё сделаю, только не жри!",
u"Спаси-ите! Лю-юди!",
u"Сожрать меня вздумал, уродина?! Чтобы ты подавился!",
u"Я описилась...",
u"Ой какой взгляд у тебя голодный...",
u"Нет. Фу. Брысь. Ай не кусай меня.",
u"Пошел вон скотина! А ну ка брысь-кому говорят. Облизывается он, ишь ты!",
u"(сдавленно хрипит)",
u"(тихонько плачет и закрывает лицо руками)",
u"(яростно отбивается и пыхтит сквозь сжатые зубы)",
),
},
'citizen': { # используется для горожанок
'new': ( # описание крестьянки
u"%(girl_name)s, дочь богача.",
),
'shout': ( # Реакция девушки, прямой речью
u"О, Господи!..",
u"Проклятая гадина!",
u"Не смей! Мой отец тебя на шашлык за такое пустит, змеюка!",
u"Прошу вас, господин дракон, не надо. Отпустите меня, умоляю...",
u"Ай. Нет-нет-нет, только не туда... ох...",
u"Только не надо зубов, я всё сделаю. Умоляю. Я же знаю чего вы хотите.",
u"Ой нет, убеерите эту... это... от меня. Стыд то какой!",
u"Ай, что вы делаете?! Больно... нет, умоляю... он же огромный... уууй больно же!!!",
u"Ты что задумал, отродье Ехидны?! Пусти... ай, тварь... пусти говорят тебе.",
u"Я слышала что драконы делают с девушками... Нет. пожалуйста не надо рычать. Я понимаю. Нет, не рвите я сниму... вот снимаю...",
u"Ох, Господи, я такого срама даже у коня в деревне не видала! Жуть то какая...",
u"Ох пресвятая дева, спаси и сохрани...",
u"(тихонько плачет и закрывает лицо руками)",
u"(яростно отбивается и пыхтит сквозь сжатые зубы)",
u"Зачем вы сдираете с меня платье? Нет, я не могу. У меня же жених... Это свершенно не... ааааАХ!",
),
'eat': ( # Описание процесса поедания девушки.
u"(молится) Отец наш небесный, да святится имя твоё, да пребудет воля твоя...",
u"(молится) Если я пойду и долиною смертной тени, не убоюсь зла, потому что Ты со мной...",
u"Неееееет!...",
u"Аааааааа!....",
u"(кашляет от исходящего изо рта дракона смрада)",
u"Ну и зубищи у вас... ай нет-нет-нет...",
u"Oh shi~",
u"Не кушайте меня,... умоляю, я всё сделаю, только не ешьте!",
u"Спаси-ите! Помогите! Кто-ниб... аааа....",
u"Сожрать меня вздумал, уродина?! Чтобы ты подавился!",
u"Нет, пожалуйста... я куплю вам целое стадо свиней... зачем меня то??",
u"Ох этот алчный взгляд...",
u"Нет. Фу. Брысь. Плохой дракон! Сидеть! Кому сказала сидеть!!!.",
u"Пошел вон скотина! А ну ка брысь-кому говорят. Облизывается он, ишь ты!",
u"(сдавленно хрипит)",
u"(тихонько плачет и закрывает лицо руками)",
u"(яростно отбивается и пыхтит сквозь сжатые зубы)",
),
},
'princess': { # используется для благородных дам
'new': ( # описание
u"%(girl_name)s, дама благородных кровей.",
),
'shout': ( # Реакция девушки, прямой речью
u"О, Господи!..",
u"Не тронь меня бесовское исчадие!",
u"Не смей! Мой отец тебя на шашлык за такое пустит, змеюка!",
u"Некоторые сичтают драконов благородными животными. Может вы будете так добры и перестанете распускать свои лапы и язык?",
u"Ай. Нет-нет-нет, только не туда... ох...",
u"Только не надо зубов, я всё сделаю. Умоляю. Я же знаю чего вы хотите.",
u"Ой нет, убеерите эту... это... от меня. Стыд то какой!",
u"Ай, что вы делаете?! Больно... нет, умоляю... он же огромный... уууй больно же!!!",
u"Ты что задумал, отродье Ехидны?! Пусти... ай, тварь... пусти говорят тебе.",
u"Я слышала что драконы делают с девушками... Нет. пожалуйста не надо рычать. Я понимаю. Нет, не рвите я сниму... вот снимаю...",
u"Ох, Господи, я такого срама даже у коня в деревне не видала! Жуть то какая...",
u"Ох пресвятая дева, спаси и сохрани...",
u"(тихонько плачет и закрывает лицо руками)",
u"(яростно отбивается и пыхтит сквозь сжатые зубы)",
u"Зачем вы сдираете с меня платье? Нет, я не могу. У меня же жених... Это свершенно не... ааааАХ!",
),
'eat': ( # Описание процесса поедания девушки.
u"(молится) Pater noster, qui es in caelis, sanctificetur nomen tuum. Adveniat regnum tuum. Fiat voluntas tua,..",
u"(молится) Nam etsi ambulavero in medio umbrae mortis, non timebo mala, quoniam tu mecum es. Virga tua, et baculus tuus,..",
u"Неееееет!...",
u"Аааааааа!....",
u"(кашляет от исходящего изо рта дракона смрада)",
u"Ну и зубищи у вас... ай нет-нет-нет...",
u"Oh shi~",
u"Не кушайте меня,... умоляю, я всё сделаю, только не ешьте!",
u"Спаси-ите! Помогите! Кто-ниб... аааа....",
u"Сожрать меня вздумал, уродина?! Чтобы ты подавился!",
u"Нет, пожалуйста... я куплю вам целое стадо свиней... зачем меня то??",
u"Ох этот алчный взгляд...",
u"Нет. Фу. Брысь. Плохой дракон! Сидеть! Кому сказала сидеть?!!.",
u"Пошел вон скотина! А ну ка брысь-кому говорят. Облизывается он, ишь ты!",
u"(сдавленно хрипит)",
u"(тихонько плачет и закрывает лицо руками)",
u"(яростно отбивается и пыхтит сквозь сжатые зубы)",
),
},
'elf': { # используется для лесных дев
'new': ( # описание девы
u"%(girl_name)s, прекрасная лесная дева из народа альвов, детей богини Дану.",
),
'shout': ( # Реакция девушки, прямой речью
u"О, Дану!..",
u"Не тронь меня исчадие скверны!",
u"Не смей! Духи леса отомсят за мою поргуанную честь!",
u"Уебери от меня эту... этот... Такой союз противен природе!",
u"Чем я заслужила такое унижение?!",
u"Ты можешь взять моё тело, но моей душой тебе не завладеть!",
),
'eat': ( # Описание процесса поедания девушки
u"Неееееет!...",
u"Аааааааа!....",
u"Если хочешь чтобы я просила пощады - не надейся!",
u"(кашляет от исходящего изо рта дракона смрада)",
),
},
'mermaid': { # используется для русалок
'new': ( # описание русалки
u"%(girl_name)s, экзотическая морская дева.",
),
'shout': ( # Реакция девушки, прямой речью
u"О, Дагон!..",
u"Не тронь меня сухопутная ящерица!",
u"Не смей! Духи вод отомсят за мою поргуанную честь!",
u"Что это за хрень у тебя между ног?! Щупальце???",
),
'eat': ( # Описание процесса поедания девушки
u"Неееееет!...",
u"Аааааааа!....",
),
},
'siren': { # используется для сирен
'new': ( # описание
u"%(girl_name)s, экзотическая морская великанша.",
),
'shout': ( # Реакция девушки, прямой речью
u"О, Дагон!..",
u"Не тронь меня сухопутная ящерица!",
u"Не смей! Духи вод отомсят за мою поргуанную честь!",
u"Что это за хрень у тебя между ног?! Щупальце???",
),
'eat': ( # Описание процесса поедания девушки
u"Неееееет!...",
u"Аааааааа!....",
),
},
'ogre': { # людоедка
'new': ( # описание
u"%(girl_name)s, глупая и диковатая людоедка.",
),
'shout': ( # Реакция девушки, прямой речью
u"Твоя меня не выебать! Моя сама выебать твоя!!! АРррргх! Смерть через СНУ-СНУ!",
),
'eat': ( # Описание процесса поедания девушки
u"Большая ящерица кусать? Я тоже кусать! КТО БОЛЬШЕ ОТКУСИТ?!.",
),
},
'ice': { # ледяная великанша
'new': ( # описание
u"%(girl_name)s, холодная и надменная ледяная великашна.",
),
'shout': ( # Реакция девушки, прямой речью
u"Хочешь моих обьятий, змей? Твоя чешуя покроется инеем, а стручок скукожится от стужи в моих чреслах. Дерзай...",
),
'eat': ( # Описание процесса поедания девушки
u"Ашшшшь... Я отморожу твои ничтожные кишки!..",
),
},
'fire': { # огненная великанша
'new': ( # описание
u"%(girl_name)s, темпераментная и страстная огненная великанша.",
),
'shout': ( # Реакция девушки, прямой речью
u"Ха! Поглядим какой из тебя любовник, змеюка. Хоть два раунда то выдержишь?",
),
'eat': ( # Описание процесса поедания девушки
u"Решил меня сожрать? Без боя я не дамся!!!",
),
},
'titan': { # людоедка
'new': ( # описание
u"%(girl_name)s, совершенная и величественная титанида.",
),
'shout': ( # Реакция девушки, прямой речью
u"Повелеваю тебе оставить грязные мысли! Ты не достоин моей любви, червь!",
),
'eat': ( # Описание процесса поедания девушки
u"О Боги, почему вы оставляете меня в смертый час?! Или я не ваша возлюбленная дщерь?",
),
},
}
|
OldHuntsman/DefilerWings
|
game/pythoncode/girls_data.py
|
Python
|
bsd-3-clause
| 58,860
|
[
"Octopus"
] |
4d1b4a7a89e111ace955eee4e91c25d39ef286d3b82b6ae0fc7b16469034947a
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import print_function
import os
import sys
import time
import subprocess
if len(sys.argv) not in [5, 6, 7, 8, 9, 10]:
print("""Usage: %s input_file logfile doperltest top_srcdir doreaptest alt_output_file alt_psi4_exe alt_psi4datadir""" % (sys.argv[0]))
sys.exit(1)
# extract run condition from arguments
python_exec = sys.argv[0]
infile = sys.argv[1]
logfile = sys.argv[2]
psiautotest = sys.argv[3]
top_srcdir = sys.argv[4]
sowreap = sys.argv[5]
if len(sys.argv) >= 7:
outfile = sys.argv[6]
else:
outfile = 'output.dat'
if len(sys.argv) >= 8:
psi = sys.argv[7]
else:
psi = '../../bin/psi4'
if len(sys.argv) >= 9:
psidatadir = sys.argv[8]
else:
psidatadir = os.path.dirname(os.path.realpath(psi)) + '/../share/psi4'
if len(sys.argv) >= 10:
psilibdir = sys.argv[9] + os.path.sep
else:
psilibdir = os.path.abspath('/../')
# open logfile and print test case header
try:
loghandle = open(logfile, 'a')
except IOError as e:
print("""I can't write to %s: %s""" % (logfile, e))
loghandle.write("""\n%s\n%s\n""" % (os.path.dirname(infile).split(os.sep)[-1], time.strftime("%Y-%m-%d %H:%M")))
def backtick(exelist):
"""Executes the command-argument list in *exelist*, directing the
standard output to screen and file logfile and string p4out. Returns
the system status of the call.
"""
try:
retcode = subprocess.Popen(exelist, bufsize=0, stdout=subprocess.PIPE, universal_newlines=True)
except OSError as e:
sys.stderr.write('Command %s execution failed: %s\n' % (exelist, e.strerror))
sys.exit(1)
p4out = ''
while True:
data = retcode.stdout.readline()
if not data:
break
sys.stdout.write(data) # screen
loghandle.write(data) # file
loghandle.flush()
p4out += data # string
while True:
retcode.poll()
exstat = retcode.returncode
if exstat is not None:
return exstat
time.sleep(0.1)
loghandle.close()
# not sure why 2nd while loop needed, as 1st while loop has always
# been adequate for driver interfaces. nevertheless, to collect
# the proper exit code, 2nd while loop very necessary.
# run psi4 and collect testing status from any compare_* in input file
if os.path.isfile(infile):
exelist = [psi, infile, outfile, '-l', psidatadir]
# On Windows set Python interpreter explicitly as the shebang is ignored
if sys.platform.startswith('win'):
exelist = [sys.executable] + exelist
pyexitcode = backtick(exelist)
elif os.path.isfile(infile.replace(".dat", ".py")):
infile = infile.replace(".dat", ".py")
if "PYTHONPATH" in os.environ:
os.environ["PYTHONPATH"] += os.pathsep + psilibdir
else:
os.environ["PYTHONPATH"] = psilibdir
outfile = os.path.dirname(infile) + os.path.sep + outfile
pyexitcode = backtick(["python", infile, " > ", outfile])
else:
raise Exception("\n\nError: Input file %s not found\n" % infile)
if sowreap == 'true':
try:
retcode = subprocess.Popen([sys.executable, '%s/tests/reap.py' %
(top_srcdir), infile, outfile, logfile, psi, psidatadir])
except OSError as e:
print("""Can't find reap script: %s """ % (e))
while True:
retcode.poll()
exstat = retcode.returncode
if exstat is not None:
reapexitcode = exstat
break
time.sleep(0.1)
else:
reapexitcode = None
# additionally invoke autotest script comparing output.dat to output.ref
if psiautotest == 'true':
os.environ['SRCDIR'] = os.path.dirname(infile)
try:
retcode = subprocess.Popen(['perl', '%s/tests/psitest.pl' % (top_srcdir), infile, logfile])
except IOError as e:
print("""Can't find psitest script: %s""" % (e))
while True:
retcode.poll()
exstat = retcode.returncode
if exstat is not None:
plexitcode = exstat
break
time.sleep(0.1)
else:
plexitcode = None
# combine, print, and return (0/1) testing status
exitcode = 0 if (pyexitcode == 0 and (plexitcode is None or plexitcode == 0) and (reapexitcode is None or reapexitcode == 0)) else 1
print('Exit Status: infile (', pyexitcode, '); autotest (', plexitcode, '); sowreap (', reapexitcode, '); overall (', exitcode, ')')
sys.exit(exitcode)
|
lothian/psi4
|
tests/runtest.py
|
Python
|
lgpl-3.0
| 5,306
|
[
"Psi4"
] |
a278547ea2831dc440b9a60947b332aab17b7953774c7c6204265e5784f17a81
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.